mirror of https://github.com/ivanch/tcc.git
139 lines
4.3 KiB
Python
139 lines
4.3 KiB
Python
import requests
|
|
import docker
|
|
import concurrent.futures
|
|
import time
|
|
import sys
|
|
import os
|
|
from graph import generate_req_graph, generate_resource_graph
|
|
from math import floor
|
|
|
|
if len(sys.argv) != 3 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
|
|
print("Usage: python testes.py <framework name> <container name>")
|
|
sys.exit(1)
|
|
|
|
THREADS = 10
|
|
FRAMEWORK_NAME = sys.argv[1]
|
|
CONTAINER_NAME = sys.argv[2]
|
|
URL_BASE = 'http://localhost:9080'
|
|
API_REQUESTS = [
|
|
('/status/ok', range(0, 30_000, 5000)),
|
|
('/image/load-image', range(0, 30_000, 5000)),
|
|
('/static/simpleimage.png', range(0, 30_000, 5000)),
|
|
('/image/load-big-image', range(0, 1_000, 200)),
|
|
('/static/bigimage.png', range(0, 1_000, 200)),
|
|
('/static/video.mp4', range(0, 10_000, 1_000)),
|
|
]
|
|
|
|
def send_request(url):
|
|
success = False
|
|
responses = {
|
|
2: 0, # OK
|
|
4: 0, # Bad Request
|
|
5: 0, # Server Error
|
|
}
|
|
while not success:
|
|
try:
|
|
response = requests.get(url)
|
|
except:
|
|
continue
|
|
success = response.status_code == 200
|
|
responses[floor(response.status_code/100)] += 1
|
|
return responses
|
|
|
|
def getFileNames(endpoint):
|
|
endpoint = endpoint.replace('/', '')
|
|
|
|
files = [
|
|
f"req_{FRAMEWORK_NAME}_{endpoint}.csv",
|
|
f"resource_{FRAMEWORK_NAME}_{endpoint}.csv",
|
|
]
|
|
|
|
return files
|
|
|
|
def record(filename, requests, reqpersec):
|
|
with open(filename, "a") as file:
|
|
file.write(f"{requests},{reqpersec}\n")
|
|
|
|
def record_resource(filename, requests, cpu, ram):
|
|
with open(filename, "a") as file:
|
|
file.write(f"{requests},{cpu},{ram}\n")
|
|
|
|
def run_tests(endpoint, num_requests):
|
|
files = getFileNames(endpoint)
|
|
for filename in files:
|
|
if os.path.exists(filename):
|
|
os.remove(filename)
|
|
|
|
for num_request in num_requests:
|
|
if num_request <= 0: continue
|
|
|
|
ok_responses = 0
|
|
bad_responses = 0
|
|
server_errors = 0
|
|
cpu, ram = 0, 0
|
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
|
|
url = f'{URL_BASE}{endpoint}'
|
|
|
|
start_time = time.time()
|
|
|
|
futures = []
|
|
#with requests.Session() as session:
|
|
# futures = [executor.submit(send_request, session, url) for _ in range(num_request)]
|
|
|
|
half = floor(num_request/2)
|
|
for i in range(num_request):
|
|
futures.append(executor.submit(send_request, url))
|
|
|
|
if i == half:
|
|
cpu, ram = get_resource_usage()
|
|
|
|
concurrent.futures.wait(futures)
|
|
|
|
elapsed_time = time.time() - start_time
|
|
|
|
for future in futures:
|
|
responses = future.result()
|
|
ok_responses += responses[2]
|
|
bad_responses += responses[4]
|
|
server_errors += responses[5]
|
|
|
|
print(f"{num_request}: {elapsed_time:.2f} seconds. {elapsed_time/num_request:.4f} seconds per request. {num_request/elapsed_time:.2f} requests per second. [OK: {ok_responses}, Bad Request: {bad_responses}, Server Error: {server_errors}]]")
|
|
record(files[0], num_request, f"{num_request/elapsed_time:.2f}")
|
|
record_resource(files[1], num_request, cpu, ram)
|
|
|
|
generate_req_graph(files[0], FRAMEWORK_NAME, endpoint)
|
|
generate_resource_graph(files[1], FRAMEWORK_NAME, endpoint)
|
|
|
|
time.sleep(3)
|
|
|
|
def get_resource_usage():
|
|
try:
|
|
client = docker.from_env()
|
|
stats = client.containers.get(CONTAINER_NAME).stats(stream=False)
|
|
except:
|
|
return 0, 0 # unable to get stats
|
|
|
|
return get_cpu_usage(stats), get_ram_usage(stats)
|
|
|
|
def get_cpu_usage(stats):
|
|
UsageDelta = stats['cpu_stats']['cpu_usage']['total_usage'] - stats['precpu_stats']['cpu_usage']['total_usage']
|
|
SystemDelta = stats['cpu_stats']['system_cpu_usage'] - stats['precpu_stats']['system_cpu_usage']
|
|
len_cpu = stats['cpu_stats']['online_cpus']
|
|
percentage = (UsageDelta / SystemDelta) * len_cpu
|
|
return f"{percentage:.2f}"
|
|
|
|
def get_ram_usage(stats):
|
|
usage = stats['memory_stats']['usage']
|
|
limit = stats['memory_stats']['limit']
|
|
|
|
percentage = (usage / limit)
|
|
|
|
# percent = round(percentage, 2)
|
|
return f"{percentage:.2f}"
|
|
|
|
|
|
for endpoint, num_requests in API_REQUESTS:
|
|
print(f"# {endpoint}")
|
|
run_tests(endpoint, num_requests)
|