From f99a81ad05c9ae45c68b1e6547b2e0b146225899 Mon Sep 17 00:00:00 2001 From: Hoang Viet Date: Thu, 9 Jun 2022 14:33:04 +0700 Subject: [PATCH] Removee GRPC due to not using + Increase version of some requirements --- docs/3_Concepts/1_Server.md | 4 +- docs/4_Tutorials/2_Keras.md | 2 +- docs/Model Deployment/mlconfig.md | 2 +- docs/Model Deployment/tutorial.md | 2 +- docs/commands/init.md | 2 +- docs/getstarted/core_concepts.md | 2 +- mlchain/__init__.py | 2 +- mlchain/cli/main.py | 1 - mlchain/cli/mlconfig.yaml | 2 +- mlchain/cli/run.py | 31 +- mlchain/client/__init__.py | 33 +- mlchain/client/grpc_client.py | 49 --- mlchain/config.py | 2 - mlchain/server/__init__.py | 6 - mlchain/server/grpc_server.py | 86 ----- mlchain/server/protos/__init__.py | 0 mlchain/server/protos/mlchain_pb2.py | 379 ---------------------- mlchain/server/protos/mlchain_pb2_grpc.py | 149 --------- mlchain/utils/system_info.py | 51 --- requirements.txt | 14 +- setup.py | 2 +- tests/dummy_server/mlconfig.yaml | 2 +- tests/test_server.py | 9 - 23 files changed, 27 insertions(+), 805 deletions(-) delete mode 100644 mlchain/client/grpc_client.py delete mode 100644 mlchain/server/grpc_server.py delete mode 100644 mlchain/server/protos/__init__.py delete mode 100644 mlchain/server/protos/mlchain_pb2.py delete mode 100644 mlchain/server/protos/mlchain_pb2_grpc.py delete mode 100644 mlchain/utils/system_info.py diff --git a/docs/3_Concepts/1_Server.md b/docs/3_Concepts/1_Server.md index d52678c..0f6c8aa 100644 --- a/docs/3_Concepts/1_Server.md +++ b/docs/3_Concepts/1_Server.md @@ -72,7 +72,7 @@ name: mlchain-server # name of service entry_file: server.py # python file contains object ServeModel host: localhost # host service port: 5000 # port service -server: flask # server option flask or grpc +server: flask # server option flask or starlette wrapper: gunicorn # wrapper option None or gunicorn gunicorn: # config gunicorn wrapper timeout: 60 # max time limit for the server to process @@ -107,7 +107,7 @@ Port to serve on. #### server: ```--server STRING``` -Type of server to run. Currently we support flask or grpc. +Type of server to run. Currently we support flask or starlette. #### wrapper: ```--wrapper STRING``` diff --git a/docs/4_Tutorials/2_Keras.md b/docs/4_Tutorials/2_Keras.md index 572db49..0aa53c5 100644 --- a/docs/4_Tutorials/2_Keras.md +++ b/docs/4_Tutorials/2_Keras.md @@ -275,7 +275,7 @@ name: Fashion-MNIST classifier # name of service entry_file: main.py # python file contains object ServeModel host: localhost # host service port: 5000 # port service -server: flask # option flask or starlette or grpc +server: flask # option flask or starlette wrapper: None # option None or gunicorn cors: true dump_request: None # None or path folder log request diff --git a/docs/Model Deployment/mlconfig.md b/docs/Model Deployment/mlconfig.md index 3a77607..0935ae8 100644 --- a/docs/Model Deployment/mlconfig.md +++ b/docs/Model Deployment/mlconfig.md @@ -22,7 +22,7 @@ name: mlchain-server # name of service entry_file: server.py # python file contains object ServeModel host: localhost # host service port: 5000 # port service -server: flask # option flask or grpc +server: flask # option flask or starlette wrapper: gunicorn # option None or gunicorn gunicorn: # config apm-server if uses gunicorn wrapper timeout: 60 diff --git a/docs/Model Deployment/tutorial.md b/docs/Model Deployment/tutorial.md index 6ec7a2e..30308d7 100644 --- a/docs/Model Deployment/tutorial.md +++ b/docs/Model Deployment/tutorial.md @@ -168,7 +168,7 @@ name: Digit-Recognizer # name of service entry_file: app.py # python file contains object ServeModel host: localhost # host service port: 5000 # port -server: flask # option flask or grpc +server: flask # option flask or starlette wrapper: None # option None or gunicorn cors: true gunicorn: # config apm-server if uses gunicorn wrapper diff --git a/docs/commands/init.md b/docs/commands/init.md index 0aac78f..5a7f54b 100644 --- a/docs/commands/init.md +++ b/docs/commands/init.md @@ -28,7 +28,7 @@ name: mlchain-server # name of service entry_file: server.py # python file contains object ServeModel host: localhost # host service port: 2222 # port service -server: flask # option flask or starlette or grpc +server: flask # option flask or starlette trace: False # option True or False queue: None # option None or rabbit or redis wrapper: None # option None or gunicorn diff --git a/docs/getstarted/core_concepts.md b/docs/getstarted/core_concepts.md index 2153fa2..ba68011 100644 --- a/docs/getstarted/core_concepts.md +++ b/docs/getstarted/core_concepts.md @@ -15,7 +15,7 @@ real life example using our service. ## ML Deployment Simple Machine Learning model deployment is the central feature of ML Chain library. Our ServeModel function allows user to deploy their model without requiring software engineering knowledge. -We support Flask and grpc for website hosting. +We support Flask and starlette for website hosting. [Read More...](../Model Deployment/general.md) diff --git a/mlchain/__init__.py b/mlchain/__init__.py index 0a46e64..275fae3 100644 --- a/mlchain/__init__.py +++ b/mlchain/__init__.py @@ -7,7 +7,7 @@ ) # Parameters of MLchain -__version__ = "0.2.8" +__version__ = "0.2.9" HOST = "https://www.api.mlchain.ml" WEB_HOST = HOST diff --git a/mlchain/cli/main.py b/mlchain/cli/main.py index 8fc7b9c..e180117 100644 --- a/mlchain/cli/main.py +++ b/mlchain/cli/main.py @@ -5,7 +5,6 @@ import click import flask import starlette -import grpc from .init import init_command from .run import run_command from .artifact import artifact_command diff --git a/mlchain/cli/mlconfig.yaml b/mlchain/cli/mlconfig.yaml index 0a9beca..fe42806 100644 --- a/mlchain/cli/mlconfig.yaml +++ b/mlchain/cli/mlconfig.yaml @@ -8,7 +8,7 @@ host: 0.0.0.0 # Host of service port: 8001 # Port service # Server config -server: flask # Option flask or starlette or grpc +server: flask # Option flask or starlette wrapper: gunicorn # Option None or gunicorn cors: true # Auto enable CORS cors_allow_origins: # Allow origins for CORS diff --git a/mlchain/cli/run.py b/mlchain/cli/run.py index d2a3424..739e5d1 100644 --- a/mlchain/cli/run.py +++ b/mlchain/cli/run.py @@ -65,7 +65,6 @@ def get_env(_k): op_gunicorn = click.option("--gunicorn", "wrapper", flag_value="gunicorn", help="Run server with gunicorn or not") op_flask = click.option("--flask", "server", flag_value="flask", help="Run with Flask server") op_starlette = click.option("--starlette", "server", flag_value="starlette", help="Run with Starlette server") -op_grpc = click.option("--grpc", "server", flag_value="grpc", help="Run with gRPC server") op_worker = click.option("--workers", "-w", "workers", default=None, type=int, help="Number of workers") op_thread = click.option("--threads", "-t", "threads", default=None, type=int, help="Number of threads") op_mode = click.option("--mode", "-m", "mode", default=None, type=str, help="The mode of mlconfig") @@ -84,7 +83,6 @@ def get_env(_k): @op_gunicorn @op_flask @op_starlette -@op_grpc @op_worker @op_thread @op_config @@ -280,25 +278,7 @@ def run_command( logger.info("Ngrok url: {0}".format(endpoint)) os.environ["NGROK_URL"] = endpoint - - ############ - # Run with grpc - ############ - if server == "grpc": - from mlchain.server.grpc_server import GrpcServer - - app = get_model(entry_file, serve_model=True) - - if app is None: - raise Exception( - "Can not init model class from {0}. Please check mlconfig.yaml or {0} or mlchain run -m {{mode}}!".format( - entry_file - ) - ) - - app = GrpcServer(app, name=name) - app.run(host, port) - elif wrapper == "gunicorn": + if wrapper == "gunicorn": ############ # Run with gunicorn ############ @@ -482,7 +462,7 @@ def load(self): elif app.__class__.__name__ == "GrpcServer": app.run(host, port, debug=debug) elif isinstance(app, ServeModel): - if server not in ["starlette", "grpc"]: + if server != "starlette": server = "flask" if server == "flask": from mlchain.server.flask_server import FlaskServer @@ -532,13 +512,6 @@ def load(self): debug=debug ) - elif server == "grpc": - from mlchain.server.grpc_server import GrpcServer - - app = GrpcServer(app, name=name) - app.run(host, port) - - def get_model(module, serve_model=False): """ Get the serve_model from entry_file diff --git a/mlchain/client/__init__.py b/mlchain/client/__init__.py index 3c4e140..8b7342d 100644 --- a/mlchain/client/__init__.py +++ b/mlchain/client/__init__.py @@ -1,40 +1,23 @@ from mlchain import mlconfig from mlchain.base import logger -from .grpc_client import GrpcClient from .http_client import HttpClient - -class Client(HttpClient, GrpcClient): - def __init__(self, api_key=None, api_address=None, serializer='json', timeout=5 * 60, headers={}, type='http', +class Client(HttpClient): + def __init__(self, api_key=None, api_address=None, serializer='json', timeout=5 * 60, headers={}, name: str = "", version: str = "", check_status=False): - assert isinstance(type, str), "type model must be a string" self._api_key = api_key self._api_address = api_address self._serializer = serializer self._timeout = timeout self._headers = headers - self._type = type - if self._type.lower() == 'http': - HttpClient.__init__(self, api_key=api_key, api_address=api_address, serializer=serializer, - timeout=timeout, headers=headers, name=name, version=version, - check_status=check_status) - elif self._type.lower() == 'grpc': - GrpcClient.__init__(self, api_key=api_key, api_address=api_address, serializer=serializer, - timeout=timeout, headers=headers, name=name, version=version, - check_status=check_status) - else: - raise Exception("type must be http or grpc") + HttpClient.__init__(self, api_key=api_key, api_address=api_address, serializer=serializer, + timeout=timeout, headers=headers, name=name, version=version, + check_status=check_status) def model(self, name: str = "", version: str = "", check_status=False): - if self._type.lower() == 'http': - return HttpClient(api_key=self._api_key, api_address=self._api_address, serializer=self._serializer, - timeout=self._timeout, headers=self._headers, name=name, version=version, - check_status=check_status) - if self._type.lower() == 'grpc': - return GrpcClient(api_key=self._api_key, api_address=self._api_address, serializer=self._serializer, - timeout=self._timeout, headers=self._headers, name=name, version=version, - check_status=check_status) - + return HttpClient(api_key=self._api_key, api_address=self._api_address, serializer=self._serializer, + timeout=self._timeout, headers=self._headers, name=name, version=version, + check_status=check_status) def get_model(name): config = mlconfig.get_client_config(name) diff --git a/mlchain/client/grpc_client.py b/mlchain/client/grpc_client.py deleted file mode 100644 index 419a128..0000000 --- a/mlchain/client/grpc_client.py +++ /dev/null @@ -1,49 +0,0 @@ -import grpc -from pathlib import Path -from mlchain.base.log import logger -from .base import MLClient -from ..server.protos import mlchain_pb2_grpc, mlchain_pb2 - - -class GrpcClient(MLClient): - def __init__(self, api_key=None, api_address=None, serializer='msgpack', - image_encoder=None, name=None, version='lastest', - check_status=False, **kwargs): - MLClient.__init__(self, api_key=api_key, api_address=api_address, - serializer=serializer, - image_encoder=image_encoder, name=name, - version=version, check_status=check_status, **kwargs) - self.channel = grpc.insecure_channel(api_address) - self.stub = mlchain_pb2_grpc.MLChainServiceStub(self.channel) - if check_status: - try: - ping = self.get('ping') - logger.info("Connect to server: {0}".format(ping)) - except Exception as e: - logger.info("Can't connect to server: {0}".format(e)) - - def _get(self, api_name, headers=None, timeout=None): - """ - GET data from url - """ - pass - - def _post(self, function_name, headers=None, args=None, kwargs=None): - if args is None: - args = [] - if kwargs is None: - kwargs = {} - if headers is None: - headers = {} - args = [open(arg, 'rb').read() if isinstance(arg, Path) else arg - for arg in args] - kwargs = {k: open(arg, 'rb').read() if isinstance(arg, Path) else arg - for k, arg in kwargs.items()} - header = mlchain_pb2.Header(serializer=self.serializer_type) - output = self.stub.call(mlchain_pb2.Message(header=header, - function_name=function_name, - args=self.serializer.encode(args), - kwargs=self.serializer.encode(kwargs), - headers=headers)) - - return output.output diff --git a/mlchain/config.py b/mlchain/config.py index 9d88e6c..c686019 100644 --- a/mlchain/config.py +++ b/mlchain/config.py @@ -6,7 +6,6 @@ import sentry_sdk from sentry_sdk.integrations.flask import FlaskIntegration import datetime -from mlchain.utils.system_info import get_gpu_statistics class BaseConfig(dict): def __init__(self, env_key='', **kwargs): @@ -162,7 +161,6 @@ def before_send(event, hint): if mlconfig.MLCHAIN_SENTRY_DROP_MODULES: event['modules'] = {} - event['extra']["gpuinfo"] = get_gpu_statistics() return event def init_sentry(): diff --git a/mlchain/server/__init__.py b/mlchain/server/__init__.py index 7e0e28c..b28458e 100644 --- a/mlchain/server/__init__.py +++ b/mlchain/server/__init__.py @@ -11,9 +11,3 @@ except Exception as ex: # pragma: no cover import warnings warnings.warn("Can't import StarletteServer. {0}".format(ex)) - -try: - from .grpc_server import GrpcServer -except Exception as ex: # pragma: no cover - import warnings - warnings.warn("Can't import GrpcServer. {0}".format(ex)) diff --git a/mlchain/server/grpc_server.py b/mlchain/server/grpc_server.py deleted file mode 100644 index 2e19705..0000000 --- a/mlchain/server/grpc_server.py +++ /dev/null @@ -1,86 +0,0 @@ -import time -from threading import Thread -from concurrent import futures -from uuid import uuid4 -import grpc -import mlchain -from mlchain import mlchain_context -from mlchain.base.exceptions import MlChainError -from mlchain.base.log import logger -from mlchain.base.serve_model import ServeModel -from .base import MLServer -from .protos import mlchain_pb2, mlchain_pb2_grpc - - -class GrpcServer(mlchain_pb2_grpc.MLChainServiceServicer, MLServer): - """Provides methods that implement functionality of route guide server.""" - - def __init__(self, model: ServeModel, name=None, version='0.0'): - MLServer.__init__(self, model, name=name) - self.version = version - - def get_serializer(self, serializer): - if serializer in self.serializers_dict: - return self.serializers_dict[serializer] - return self.serializers_dict['application/json'] - - def ping(self, request, context): - return mlchain_pb2.Byte(value=b'pong') - - def call(self, request, context): - header = request.header - function_name = request.function_name - args = request.args - kwargs = request.kwargs - serializer = self.get_serializer(header.serializer) - headers = request.headers - uid = str(uuid4()) - mlchain_context.set(headers) - mlchain_context['MLCHAIN_CONTEXT_ID'] = uid - args = serializer.decode(args) - kwargs = serializer.decode(kwargs) - func = self.model.get_function(function_name) - kwargs = self.get_kwargs(func, *args, **kwargs) - kwargs = self._normalize_kwargs_to_valid_format(kwargs, func) - try: - start = time.time() - output = self.model.call_function(function_name, None, **kwargs) - duration = time.time() - start - output = { - 'output': output, - 'time': duration, - 'api_version': self.version, - 'mlchain_version': mlchain.__version__, - "request_id": mlchain_context.MLCHAIN_CONTEXT_ID - } - except MlChainError as ex: - err = ex.msg - logger.error("code: {0} msg: {1}".format(ex.code, ex.msg)) - output = { - 'error': err, - 'time': 0, - 'code': ex.code, - 'api_version': self.version, - 'mlchain_version': mlchain.__version__, - "request_id": mlchain_context.MLCHAIN_CONTEXT_ID - } - - except Exception as ex: - output = { - 'output': str(ex), - 'time': 0, - 'api_version': self.version, - 'mlchain_version': mlchain.__version__, - "request_id": mlchain_context.MLCHAIN_CONTEXT_ID - } - return mlchain_pb2.Output(output=serializer.encode(output)) - - def run(self, host='127.0.0.1', port=10010, workers=1, block=True): - server = grpc.server(futures.ThreadPoolExecutor(max_workers=workers)) - mlchain_pb2_grpc.add_MLChainServiceServicer_to_server(self, server) - server.add_insecure_port('{0}:{1}'.format(host, port)) - server.start() - if block: - server.wait_for_termination() - else: - Thread(target=server.wait_for_termination).start() diff --git a/mlchain/server/protos/__init__.py b/mlchain/server/protos/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/mlchain/server/protos/mlchain_pb2.py b/mlchain/server/protos/mlchain_pb2.py deleted file mode 100644 index 9eb72a2..0000000 --- a/mlchain/server/protos/mlchain_pb2.py +++ /dev/null @@ -1,379 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: mlchain.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - -DESCRIPTOR = _descriptor.FileDescriptor( - name='mlchain.proto', - package='', - syntax='proto3', - serialized_options=None, - serialized_pb=b'\n\rmlchain.proto\x1a\x1bgoogle/protobuf/empty.proto\"\x17\n\x06String\x12\r\n\x05value\x18\x01 \x01(\t\"\x15\n\x04\x42yte\x12\r\n\x05value\x18\x01 \x01(\x0c\"\xaf\x01\n\x07Message\x12\x17\n\x06header\x18\x01 \x01(\x0b\x32\x07.Header\x12\x15\n\rfunction_name\x18\x02 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x03 \x01(\x0c\x12\x0e\n\x06kwargs\x18\x04 \x01(\x0c\x12&\n\x07headers\x18\x05 \x03(\x0b\x32\x15.Message.HeadersEntry\x1a.\n\x0cHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"1\n\x06Header\x12\x12\n\nserializer\x18\x01 \x01(\t\x12\x13\n\x0bTraceparent\x18\x02 \x01(\x0c\"\x18\n\x06Output\x12\x0e\n\x06output\x18\x02 \x01(\x0c\x32\xb2\x02\n\x0eMLChainService\x12\x1c\n\nget_params\x12\x07.String\x1a\x05.Byte\x12\x1a\n\x08\x64\x65s_func\x12\x07.String\x1a\x05.Byte\x12%\n\x04ping\x12\x16.google.protobuf.Empty\x1a\x05.Byte\x12,\n\x0b\x64\x65scription\x12\x16.google.protobuf.Empty\x1a\x05.Byte\x12\x32\n\x11list_all_function\x12\x16.google.protobuf.Empty\x1a\x05.Byte\x12\x42\n!list_all_function_and_description\x12\x16.google.protobuf.Empty\x1a\x05.Byte\x12\x19\n\x04\x63\x61ll\x12\x08.Message\x1a\x07.Outputb\x06proto3' - , - dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, ]) - -_STRING = _descriptor.Descriptor( - name='String', - full_name='String', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='value', full_name='String.value', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=46, - serialized_end=69, -) - -_BYTE = _descriptor.Descriptor( - name='Byte', - full_name='Byte', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='value', full_name='Byte.value', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=71, - serialized_end=92, -) - -_MESSAGE_HEADERSENTRY = _descriptor.Descriptor( - name='HeadersEntry', - full_name='Message.HeadersEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='Message.HeadersEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='Message.HeadersEntry.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=b'8\001', - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=224, - serialized_end=270, -) - -_MESSAGE = _descriptor.Descriptor( - name='Message', - full_name='Message', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='header', full_name='Message.header', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='function_name', full_name='Message.function_name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='args', full_name='Message.args', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='kwargs', full_name='Message.kwargs', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='headers', full_name='Message.headers', index=4, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_MESSAGE_HEADERSENTRY, ], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=95, - serialized_end=270, -) - -_HEADER = _descriptor.Descriptor( - name='Header', - full_name='Header', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='serializer', full_name='Header.serializer', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='Traceparent', full_name='Header.Traceparent', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=272, - serialized_end=321, -) - -_OUTPUT = _descriptor.Descriptor( - name='Output', - full_name='Output', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='output', full_name='Output.output', index=0, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=323, - serialized_end=347, -) - -_MESSAGE_HEADERSENTRY.containing_type = _MESSAGE -_MESSAGE.fields_by_name['header'].message_type = _HEADER -_MESSAGE.fields_by_name['headers'].message_type = _MESSAGE_HEADERSENTRY -DESCRIPTOR.message_types_by_name['String'] = _STRING -DESCRIPTOR.message_types_by_name['Byte'] = _BYTE -DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE -DESCRIPTOR.message_types_by_name['Header'] = _HEADER -DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -String = _reflection.GeneratedProtocolMessageType('String', (_message.Message,), { - 'DESCRIPTOR': _STRING, - '__module__': 'mlchain_pb2' - # @@protoc_insertion_point(class_scope:String) -}) -_sym_db.RegisterMessage(String) - -Byte = _reflection.GeneratedProtocolMessageType('Byte', (_message.Message,), { - 'DESCRIPTOR': _BYTE, - '__module__': 'mlchain_pb2' - # @@protoc_insertion_point(class_scope:Byte) -}) -_sym_db.RegisterMessage(Byte) - -Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), { - - 'HeadersEntry': _reflection.GeneratedProtocolMessageType('HeadersEntry', (_message.Message,), { - 'DESCRIPTOR': _MESSAGE_HEADERSENTRY, - '__module__': 'mlchain_pb2' - # @@protoc_insertion_point(class_scope:Message.HeadersEntry) - }) - , - 'DESCRIPTOR': _MESSAGE, - '__module__': 'mlchain_pb2' - # @@protoc_insertion_point(class_scope:Message) -}) -_sym_db.RegisterMessage(Message) -_sym_db.RegisterMessage(Message.HeadersEntry) - -Header = _reflection.GeneratedProtocolMessageType('Header', (_message.Message,), { - 'DESCRIPTOR': _HEADER, - '__module__': 'mlchain_pb2' - # @@protoc_insertion_point(class_scope:Header) -}) -_sym_db.RegisterMessage(Header) - -Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), { - 'DESCRIPTOR': _OUTPUT, - '__module__': 'mlchain_pb2' - # @@protoc_insertion_point(class_scope:Output) -}) -_sym_db.RegisterMessage(Output) - -_MESSAGE_HEADERSENTRY._options = None - -_MLCHAINSERVICE = _descriptor.ServiceDescriptor( - name='MLChainService', - full_name='MLChainService', - file=DESCRIPTOR, - index=0, - serialized_options=None, - serialized_start=350, - serialized_end=656, - methods=[ - _descriptor.MethodDescriptor( - name='get_params', - full_name='MLChainService.get_params', - index=0, - containing_service=None, - input_type=_STRING, - output_type=_BYTE, - serialized_options=None, - ), - _descriptor.MethodDescriptor( - name='des_func', - full_name='MLChainService.des_func', - index=1, - containing_service=None, - input_type=_STRING, - output_type=_BYTE, - serialized_options=None, - ), - _descriptor.MethodDescriptor( - name='ping', - full_name='MLChainService.ping', - index=2, - containing_service=None, - input_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - output_type=_BYTE, - serialized_options=None, - ), - _descriptor.MethodDescriptor( - name='description', - full_name='MLChainService.description', - index=3, - containing_service=None, - input_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - output_type=_BYTE, - serialized_options=None, - ), - _descriptor.MethodDescriptor( - name='list_all_function', - full_name='MLChainService.list_all_function', - index=4, - containing_service=None, - input_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - output_type=_BYTE, - serialized_options=None, - ), - _descriptor.MethodDescriptor( - name='list_all_function_and_description', - full_name='MLChainService.list_all_function_and_description', - index=5, - containing_service=None, - input_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - output_type=_BYTE, - serialized_options=None, - ), - _descriptor.MethodDescriptor( - name='call', - full_name='MLChainService.call', - index=6, - containing_service=None, - input_type=_MESSAGE, - output_type=_OUTPUT, - serialized_options=None, - ), - ]) -_sym_db.RegisterServiceDescriptor(_MLCHAINSERVICE) - -DESCRIPTOR.services_by_name['MLChainService'] = _MLCHAINSERVICE - -# @@protoc_insertion_point(module_scope) diff --git a/mlchain/server/protos/mlchain_pb2_grpc.py b/mlchain/server/protos/mlchain_pb2_grpc.py deleted file mode 100644 index e9d1298..0000000 --- a/mlchain/server/protos/mlchain_pb2_grpc.py +++ /dev/null @@ -1,149 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from . import mlchain_pb2 as mlchain__pb2 - - -class MLChainServiceStub: - """Service definition - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.get_params = channel.unary_unary( - '/MLChainService/get_params', - request_serializer=mlchain__pb2.String.SerializeToString, - response_deserializer=mlchain__pb2.Byte.FromString, - ) - self.des_func = channel.unary_unary( - '/MLChainService/des_func', - request_serializer=mlchain__pb2.String.SerializeToString, - response_deserializer=mlchain__pb2.Byte.FromString, - ) - self.ping = channel.unary_unary( - '/MLChainService/ping', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=mlchain__pb2.Byte.FromString, - ) - self.description = channel.unary_unary( - '/MLChainService/description', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=mlchain__pb2.Byte.FromString, - ) - self.list_all_function = channel.unary_unary( - '/MLChainService/list_all_function', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=mlchain__pb2.Byte.FromString, - ) - self.list_all_function_and_description = channel.unary_unary( - '/MLChainService/list_all_function_and_description', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=mlchain__pb2.Byte.FromString, - ) - self.call = channel.unary_unary( - '/MLChainService/call', - request_serializer=mlchain__pb2.Message.SerializeToString, - response_deserializer=mlchain__pb2.Output.FromString, - ) - - -class MLChainServiceServicer(object): - """Service definition - """ - - def get_params(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def des_func(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ping(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def description(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def list_all_function(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def list_all_function_and_description(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def call(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_MLChainServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'get_params': grpc.unary_unary_rpc_method_handler( - servicer.get_params, - request_deserializer=mlchain__pb2.String.FromString, - response_serializer=mlchain__pb2.Byte.SerializeToString, - ), - 'des_func': grpc.unary_unary_rpc_method_handler( - servicer.des_func, - request_deserializer=mlchain__pb2.String.FromString, - response_serializer=mlchain__pb2.Byte.SerializeToString, - ), - 'ping': grpc.unary_unary_rpc_method_handler( - servicer.ping, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=mlchain__pb2.Byte.SerializeToString, - ), - 'description': grpc.unary_unary_rpc_method_handler( - servicer.description, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=mlchain__pb2.Byte.SerializeToString, - ), - 'list_all_function': grpc.unary_unary_rpc_method_handler( - servicer.list_all_function, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=mlchain__pb2.Byte.SerializeToString, - ), - 'list_all_function_and_description': grpc.unary_unary_rpc_method_handler( - servicer.list_all_function_and_description, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=mlchain__pb2.Byte.SerializeToString, - ), - 'call': grpc.unary_unary_rpc_method_handler( - servicer.call, - request_deserializer=mlchain__pb2.Message.FromString, - response_serializer=mlchain__pb2.Output.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'MLChainService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/mlchain/utils/system_info.py b/mlchain/utils/system_info.py deleted file mode 100644 index be433d4..0000000 --- a/mlchain/utils/system_info.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -The code is referenced from https://github.com/jacenkow/gpu-sentry/blob/master/gpu_sentry/client.py -""" -from pynvml import ( - NVMLError, - nvmlDeviceGetCount, - nvmlDeviceGetHandleByIndex, - nvmlDeviceGetMemoryInfo, - nvmlDeviceGetName, - nvmlInit, -) -from mlchain.base.log import logger - -def _convert_kb_to_gb(size): - """Convert given size in kB to GB with 2-decimal places rounding.""" - return round(size / 1024 ** 3, 2) - -class GPUStats: - def __init__(self): - try: - nvmlInit() - self.has_gpu = True - except Exception as error: - logger.debug(f"Cannot get GPU info: {error}") - self.has_gpu = False - if self.has_gpu: - self.gpu_count = nvmlDeviceGetCount() - - def get_gpu_statistics(self): - """Get statistics for each GPU installed in the system.""" - if not self.has_gpu: - return [] - statistics = [] - for i in range(self.gpu_count): - handle = nvmlDeviceGetHandleByIndex(i) - memory = nvmlDeviceGetMemoryInfo(handle) - statistics.append({ - "gpu": i, - "name": nvmlDeviceGetName(handle).decode("utf-8"), - "memory": { - "total": _convert_kb_to_gb(int(memory.total)), - "used": _convert_kb_to_gb(int(memory.used)), - "utilisation": int(memory.used / memory.total * 100) - }, - }) - return statistics - -gpu_stats = GPUStats() - -def get_gpu_statistics(): - return gpu_stats.get_gpu_statistics() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 40e7ef3..2ca9abb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,30 +1,28 @@ attrs>=20.3.0 blosc==1.10.6; sys_platform != 'win32' h11==0.12.0 -Flask==2.1.0 +Flask==2.1.2 Flask-Cors==3.0.10 gunicorn==20.1.0 gevent==21.12.0 -msgpack==1.0.3 +msgpack==1.0.4 numpy<1.20; python_version == '3.6' numpy<=1.20.3; python_version >= '3.7' opencv-python>=4.5.1 Pillow>=8.0.1 -starlette[full]==0.19.0 +starlette[full]==0.20.2 requests>=2.25.1 six>=1.13.0 toml>=0.10.0 -trio==0.20.0 +trio==0.21.0 urllib3>=1.26.2 uvicorn[standard]==0.17.6 uvloop==0.14.0; sys_platform != 'win32' and python_version == '3.6' uvloop>=0.16.0; sys_platform != 'win32' and python_version >= '3.7' -httpx==0.22.0 -grpcio -protobuf>=3.10.0 +httpx==0.23.0 boto3>=1.16.43 pyyaml>=5.3.1 -sentry-sdk[flask]>=1.5.8 +sentry-sdk[flask]>=1.5.12 thefuzz GPUtil>=1.4.0 tqdm diff --git a/setup.py b/setup.py index e2e6d04..4c976f7 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ import os from setuptools import setup, find_packages -__version__ = "0.2.8" +__version__ = "0.2.9" project = "mlchain" diff --git a/tests/dummy_server/mlconfig.yaml b/tests/dummy_server/mlconfig.yaml index 6c2c821..6591576 100644 --- a/tests/dummy_server/mlconfig.yaml +++ b/tests/dummy_server/mlconfig.yaml @@ -3,7 +3,7 @@ version: '0.0.1' entry_file: server.py # python file contains object ServeModel host: localhost # host service port: 12345 # port service -server: flask # option flask or starlette or grpc +server: flask # option flask or starlette wrapper: gunicorn cors: true gunicorn: # config apm-server if uses gunicorn wrapper diff --git a/tests/test_server.py b/tests/test_server.py index 77297ec..04677f6 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -13,7 +13,6 @@ import numpy as np from mlchain.base import ServeModel from mlchain.server.flask_server import FlaskServer -from mlchain.server.grpc_server import GrpcServer from mlchain.server.starlette_server import StarletteServer from mlchain.decorators import except_serving from mlchain.base.serve_model import batch,non_thread @@ -83,13 +82,5 @@ def test_starlette_server_init(self): from .utils import test_breaking_process_server test_breaking_process_server(starlette_model, port=10002) - def test_grpc_server_init(self): - logger.info("Running grpc server init test") - model = ServeModel(original_model) - grpc_model = GrpcServer(model) - # if self.is_not_windows: - # test_breaking_process_server(grpc_model, port=10003) - - if __name__ == "__main__": unittest.main()