mirror of https://github.com/ivanch/tcc.git
156 lines
5.1 KiB
Python
156 lines
5.1 KiB
Python
import requests
|
|
import docker
|
|
import concurrent.futures
|
|
import time
|
|
import sys
|
|
import os
|
|
from graph import generate_req_graph, generate_resource_graph
|
|
from math import floor
|
|
from init import init
|
|
|
|
if len(sys.argv) < 2 or len(sys.argv) > 3 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
|
|
print("Usage: python testes.py <framework name> [container name]")
|
|
sys.exit(1)
|
|
|
|
init()
|
|
|
|
THREADS = 10
|
|
FRAMEWORK_NAME = sys.argv[1]
|
|
CONTAINER_NAME = sys.argv[2] if len(sys.argv) > 2 else ""
|
|
URL_BASE = 'http://localhost:9080'
|
|
BLUR_RADIUS = 5
|
|
API_REQUESTS = [
|
|
('/image/save-big-image', 'POST', range(0, 30_000, 50), open('big-image.png', 'rb').read()),
|
|
(f'/image/blur?radius={BLUR_RADIUS}', 'POST', range(0, 1_000, 50), open('small-image.png', 'rb').read()),
|
|
('/status/ok', 'GET', range(0, 30_000, 5000), None),
|
|
('/image/load-image', 'GET', range(0, 30_000, 5000), None),
|
|
('/static/simpleimage.png', 'GET', range(0, 30_000, 5000), None),
|
|
('/image/load-big-image', 'GET', range(0, 1_000, 200), None),
|
|
('/static/bigimage.png', 'GET', range(0, 1_000, 200), None),
|
|
('/static/video.mp4', 'GET', range(0, 10_000, 1_000), None),
|
|
]
|
|
|
|
def send_request(url, method = 'GET', payload = None):
|
|
success = False
|
|
responses = {
|
|
2: 0, # OK
|
|
4: 0, # Bad Request
|
|
5: 0, # Server Error
|
|
}
|
|
while not success:
|
|
try:
|
|
response = None
|
|
if method == 'GET':
|
|
response = requests.get(url)
|
|
elif method == 'POST':
|
|
response = requests.post(url, data=payload, headers={'Content-Type': 'image/png'})
|
|
except:
|
|
continue
|
|
success = response.status_code == 200
|
|
responses[floor(response.status_code/100)] += 1
|
|
return responses
|
|
|
|
def getFileNames(endpoint):
|
|
endpoint = endpoint.replace('/', '')
|
|
|
|
files = [
|
|
f"data/req_{FRAMEWORK_NAME}_{endpoint}.csv",
|
|
f"data/resource_{FRAMEWORK_NAME}_{endpoint}.csv",
|
|
]
|
|
|
|
return files
|
|
|
|
def record(filename, requests, reqpersec):
|
|
with open(filename, "a") as file:
|
|
file.write(f"{requests},{reqpersec}\n")
|
|
|
|
def record_resource(filename, requests, cpu, ram):
|
|
with open(filename, "a") as file:
|
|
file.write(f"{requests},{cpu},{ram}\n")
|
|
|
|
def run_tests(endpoint, method, num_requests, metadata):
|
|
files = getFileNames(endpoint)
|
|
for filename in files:
|
|
if os.path.exists(filename):
|
|
os.remove(filename)
|
|
|
|
for num_request in num_requests:
|
|
if num_request <= 0: continue
|
|
|
|
ok_responses = 0
|
|
bad_responses = 0
|
|
server_errors = 0
|
|
cpu, ram = 0, 0
|
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
|
|
url = f'{URL_BASE}{endpoint}'
|
|
|
|
start_time = time.time()
|
|
|
|
futures = []
|
|
#with requests.Session() as session:
|
|
# futures = [executor.submit(send_request, session, url) for _ in range(num_request)]
|
|
|
|
half = floor(num_request/2)
|
|
for i in range(num_request):
|
|
futures.append(executor.submit(send_request, url, method, metadata))
|
|
|
|
if i == half:
|
|
cpu, ram = get_resource_usage()
|
|
|
|
concurrent.futures.wait(futures)
|
|
|
|
elapsed_time = time.time() - start_time
|
|
|
|
for future in futures:
|
|
responses = future.result()
|
|
ok_responses += responses[2]
|
|
bad_responses += responses[4]
|
|
server_errors += responses[5]
|
|
|
|
print(f"{num_request}: {elapsed_time:.2f} seconds. {elapsed_time/num_request:.4f} seconds per request. {num_request/elapsed_time:.2f} requests per second. [OK: {ok_responses}, Bad Request: {bad_responses}, Server Error: {server_errors}]]")
|
|
record(files[0], num_request, f"{num_request/elapsed_time:.2f}")
|
|
record_resource(files[1], num_request, cpu, ram)
|
|
|
|
generate_req_graph(files[0], FRAMEWORK_NAME, endpoint)
|
|
generate_resource_graph(files[1], FRAMEWORK_NAME, endpoint)
|
|
|
|
time.sleep(3)
|
|
|
|
def get_resource_usage():
|
|
if CONTAINER_NAME == "": return 0, 0
|
|
|
|
try:
|
|
client = docker.from_env()
|
|
stats = client.containers.get(CONTAINER_NAME).stats(stream=False)
|
|
except:
|
|
return 0, 0 # unable to get stats
|
|
|
|
return get_cpu_usage(stats), get_ram_usage(stats)
|
|
|
|
def get_cpu_usage(stats):
|
|
UsageDelta = stats['cpu_stats']['cpu_usage']['total_usage'] - stats['precpu_stats']['cpu_usage']['total_usage']
|
|
SystemDelta = stats['cpu_stats']['system_cpu_usage'] - stats['precpu_stats']['system_cpu_usage']
|
|
len_cpu = stats['cpu_stats']['online_cpus']
|
|
percentage = (UsageDelta / SystemDelta) * len_cpu
|
|
return f"{percentage:.2f}"
|
|
|
|
def get_ram_usage(stats):
|
|
usage = stats['memory_stats']['usage']
|
|
limit = stats['memory_stats']['limit']
|
|
|
|
percentage = (usage / limit)
|
|
|
|
# percent = round(percentage, 2)
|
|
return f"{percentage:.2f}"
|
|
|
|
if __name__ == "__main__":
|
|
if not os.path.exists("data"):
|
|
os.mkdir("data")
|
|
else:
|
|
os.system("rm -rf data/*")
|
|
|
|
for endpoint, method, num_requests, metadata in API_REQUESTS:
|
|
print(f"# {endpoint}")
|
|
run_tests(endpoint, method, num_requests, metadata)
|