From 111cfa97f1ebf07a598386e0fba915695f65d30b Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 20 Dec 2023 00:33:24 +0000 Subject: [PATCH] test only model load on petals --- backend/python/vllm/test_backend_vllm.py | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/backend/python/vllm/test_backend_vllm.py b/backend/python/vllm/test_backend_vllm.py index 7760f816..1cbdcd0a 100644 --- a/backend/python/vllm/test_backend_vllm.py +++ b/backend/python/vllm/test_backend_vllm.py @@ -55,22 +55,3 @@ class TestBackendServicer(unittest.TestCase): self.fail("LoadModel service failed") finally: self.tearDown() - - def test_text(self): - """ - This method tests if the embeddings are generated successfully - """ - try: - self.setUp() - with grpc.insecure_channel("localhost:50051") as channel: - stub = backend_pb2_grpc.BackendStub(channel) - response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m")) - self.assertTrue(response.success) - req = backend_pb2.PredictOptions(Prompt="The capital of France is") - resp = stub.Predict(req) - self.assertIsNotNone(resp.message) - except Exception as err: - print(err) - self.fail("text service failed") - finally: - self.tearDown() \ No newline at end of file