tcc/scripts/testes.py

141 lines
4.3 KiB
Python

import requests
import docker
import concurrent.futures
import time
import sys
import os
from math import floor
from init import init
from common import FRAMEWORKS, ENDPOINTS, API_REQUESTS
init()
THREADS = 10
FRAMEWORK_NAME = ""
CONTAINER_NAME = ""
URL_BASE = 'http://localhost:9090'
def send_request(url, method = 'GET', payload = None):
success = False
responses = {
2: 0, # OK
4: 0, # Bad Request
5: 0, # Server Error
}
while not success:
try:
response = None
if method == 'GET':
response = requests.get(url)
elif method == 'POST':
response = requests.post(url, data=payload, headers={'Content-Type': 'image/png'})
except:
continue
success = response.status_code == 200
responses[floor(response.status_code/100)] += 1
return responses
def getFileNames(endpoint):
endpoint = endpoint.replace('/', '')
files = [
f"data/req_{FRAMEWORK_NAME}_{endpoint}.csv",
f"data/resource_{FRAMEWORK_NAME}_{endpoint}.csv",
]
return files
def record(filename, requests, reqpersec):
with open(filename, "a") as file:
file.write(f"{requests},{reqpersec}\n")
def record_resource(filename, requests, cpu, ram):
with open(filename, "a") as file:
file.write(f"{requests},{cpu},{ram}\n")
def run_tests(endpoint, method, num_requests, metadata):
files = getFileNames(endpoint)
for filename in files:
if os.path.exists(filename):
os.remove(filename)
for num_request in num_requests:
if num_request <= 0: continue
ok_responses = 0
bad_responses = 0
server_errors = 0
cpu, ram = 0, 0
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
url = f'{URL_BASE}{endpoint}'
start_time = time.time()
futures = []
#with requests.Session() as session:
# futures = [executor.submit(send_request, session, url) for _ in range(num_request)]
half = floor(num_request/2)
for i in range(num_request):
futures.append(executor.submit(send_request, url, method, metadata))
if i == half:
cpu, ram = get_resource_usage()
concurrent.futures.wait(futures)
elapsed_time = time.time() - start_time
for future in futures:
responses = future.result()
ok_responses += responses[2]
bad_responses += responses[4]
server_errors += responses[5]
print(f"{num_request}: {elapsed_time:.2f} seconds. {elapsed_time/num_request:.4f} seconds per request. {num_request/elapsed_time:.2f} requests per second. [OK: {ok_responses}, Bad Request: {bad_responses}, Server Error: {server_errors}]]")
record(files[0], num_request, f"{num_request/elapsed_time:.2f}")
record_resource(files[1], num_request, cpu, ram)
time.sleep(3)
def get_resource_usage():
if CONTAINER_NAME == "": return 0, 0
try:
client = docker.from_env()
stats = client.containers.get(CONTAINER_NAME).stats(stream=False)
except:
return 0, 0 # unable to get stats
return get_cpu_usage(stats), get_ram_usage(stats)
def get_cpu_usage(stats):
UsageDelta = stats['cpu_stats']['cpu_usage']['total_usage'] - stats['precpu_stats']['cpu_usage']['total_usage']
SystemDelta = stats['cpu_stats']['system_cpu_usage'] - stats['precpu_stats']['system_cpu_usage']
len_cpu = stats['cpu_stats']['online_cpus']
percentage = (UsageDelta / SystemDelta) * len_cpu
return f"{percentage:.2f}"
def get_ram_usage(stats):
usage = stats['memory_stats']['usage']
limit = stats['memory_stats']['limit']
percentage = (usage / limit)
# percent = round(percentage, 2)
return f"{percentage:.2f}"
if __name__ == "__main__":
if not os.path.exists("data"):
os.mkdir("data")
for i in range(len(FRAMEWORKS)):
FRAMEWORK_NAME = FRAMEWORKS[i][0]
CONTAINER_NAME = FRAMEWORKS[i][1]
URL_BASE = ENDPOINTS[FRAMEWORK_NAME]
for endpoint, method, num_requests, metadata in API_REQUESTS:
print(f"# {FRAMEWORK_NAME} - {endpoint}")
run_tests(endpoint, method, num_requests, metadata)