mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
server bench: fix bench not waiting for model load (#7284)
This commit is contained in:
parent
9f773486ab
commit
583fd6b000
@ -293,13 +293,14 @@ def start_server_background(args):
|
||||
|
||||
|
||||
def is_server_listening(server_fqdn, server_port):
|
||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
||||
result = sock.connect_ex((server_fqdn, server_port))
|
||||
_is_server_listening = result == 0
|
||||
if _is_server_listening:
|
||||
print(f"server is listening on {server_fqdn}:{server_port}...")
|
||||
return _is_server_listening
|
||||
|
||||
try:
|
||||
url = f"{server_fqdn}:{server_port}/health"
|
||||
if not url.startswith("http://"):
|
||||
url = f"http://{url}"
|
||||
result = requests.get(url)
|
||||
return result.status_code == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def escape_metric_name(metric_name):
|
||||
return re.sub('[^A-Z0-9]', '_', metric_name.upper())
|
||||
|
Loading…
Reference in New Issue
Block a user