[Bugfix] Add image placeholder for OpenAI Compatible Server of MiniCPM-V (#6787)
Co-authored-by: hezhihui <hzh7269@modelbest.cn> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com>
This commit is contained in:
parent
316a41ac1d
commit
b75e314fff
@ -4,6 +4,8 @@ from vllm import LLM, SamplingParams
|
|||||||
from vllm.assets.image import ImageAsset
|
from vllm.assets.image import ImageAsset
|
||||||
|
|
||||||
# 2.0
|
# 2.0
|
||||||
|
# The official repo doesn't work yet, so we need to use a fork for now
|
||||||
|
# For more details, please see: See: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630
|
||||||
# MODEL_NAME = "HwwwH/MiniCPM-V-2"
|
# MODEL_NAME = "HwwwH/MiniCPM-V-2"
|
||||||
# 2.5
|
# 2.5
|
||||||
MODEL_NAME = "openbmb/MiniCPM-Llama3-V-2_5"
|
MODEL_NAME = "openbmb/MiniCPM-Llama3-V-2_5"
|
||||||
|
|||||||
@ -100,7 +100,9 @@ def _image_token_str(model_config: ModelConfig,
|
|||||||
if model_type == "phi3_v":
|
if model_type == "phi3_v":
|
||||||
# Workaround since this token is not defined in the tokenizer
|
# Workaround since this token is not defined in the tokenizer
|
||||||
return "<|image_1|>"
|
return "<|image_1|>"
|
||||||
if model_type in ("blip-2", "chatglm", "fuyu", "minicpmv", "paligemma"):
|
if model_type == "minicpmv":
|
||||||
|
return "()"
|
||||||
|
if model_type in ("blip-2", "chatglm", "fuyu", "paligemma"):
|
||||||
# These models do not use image tokens in the prompt
|
# These models do not use image tokens in the prompt
|
||||||
return None
|
return None
|
||||||
if model_type.startswith("llava"):
|
if model_type.startswith("llava"):
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user