diff --git a/server/clip_server/model/clip_model.py b/server/clip_server/model/clip_model.py index fc8a9dd01..7164cfea8 100644 --- a/server/clip_server/model/clip_model.py +++ b/server/clip_server/model/clip_model.py @@ -35,7 +35,18 @@ def __new__(cls, name: str, **kwargs): instance = super().__new__(MultilingualCLIPModel) else: - raise ValueError(f'The CLIP model name=`{name}` is not supported.') + raise ValueError( + 'CLIP model {} not found; below is a list of all available models:\n{}'.format( + name, + ''.join( + [ + '\t- {}\n'.format(i) + for i in list(_OPENCLIP_MODELS.keys()) + + list(_MULTILINGUALCLIP_MODELS.keys()) + ] + ), + ) + ) else: instance = super().__new__(cls) return instance diff --git a/server/clip_server/model/clip_onnx.py b/server/clip_server/model/clip_onnx.py index 980e71b52..cf82f9100 100644 --- a/server/clip_server/model/clip_onnx.py +++ b/server/clip_server/model/clip_onnx.py @@ -193,7 +193,10 @@ def __init__(self, name: str, model_path: str = None): ) else: raise RuntimeError( - f'Model {name} not found; available models = {list(_MODELS.keys())}' + 'CLIP model {} not found or not supports ONNX backend; below is a list of all available models:\n{}'.format( + name, + ''.join(['\t- {}\n'.format(i) for i in list(_MODELS.keys())]), + ) ) @staticmethod diff --git a/server/clip_server/model/clip_trt.py b/server/clip_server/model/clip_trt.py index b43a20c5b..1510003c5 100644 --- a/server/clip_server/model/clip_trt.py +++ b/server/clip_server/model/clip_trt.py @@ -114,7 +114,10 @@ def __init__( save_engine(text_engine, self._textual_path) else: raise RuntimeError( - f'Model {name} not found or not supports Nvidia TensorRT backend; available models = {list(_MODELS.keys())}' + 'CLIP model {} not found or not supports Nvidia TensorRT backend; below is a list of all available models:\n{}'.format( + name, + ''.join(['\t- {}\n'.format(i) for i in list(_MODELS.keys())]), + ) ) @staticmethod