Skip to main content
Version: 0.8.x

离线推理视觉语言

源代码: vllm-project/vllm

"""
This example shows how to use vLLM for running offline inference
with the correct prompt format on vision language models.

For most models, the prompt format should follow corresponding examples
on HuggingFace model repository.
这个示例展示了如何使用 vLLM 进行离线推理,并在视觉语言模型上使用正确的提示格式。
对于大多数模型,提示格式应遵循 HuggingFace 模型库中的相应示例。

"""
from transformers import AutoTokenizer

from vllm import LLM, SamplingParams
from vllm.assets.image import ImageAsset
from vllm.utils import FlexibleArgumentParser

# Input image and question
# 输入图片和问题
image = ImageAsset("cherry_blossom").pil_image.convert("RGB")
question = "What is the content of this image?"


# LLaVA-1.5
def run_llava(question):

prompt = f"USER: )\n{question}'
}]
prompt = tokenizer.apply_chat_template(messages,
tokenize=False,
add_generation_prompt=True)
return llm, prompt, stop_token_ids


# InternVL
def run_internvl(question):
model_name = "OpenGVLab/InternVL2-2B"

llm = LLM(
model=model_name,
trust_remote_code=True,
max_num_seqs=5,
)

tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True)
messages = [{'role': 'user', 'content': f"<image>\n{question}"}]
prompt = tokenizer.apply_chat_template(messages,
tokenize=False,
add_generation_prompt=True)

# Stop tokens for InternVL
# models variants may have different stop tokens
# please refer to the model card for the correct "stop words":
# https://huggingface.co/OpenGVLab/InternVL2-2B#service
# InternVL 模型变体的停止标记可能有所不同。
# 请参考模型卡中的正确“停止词”:
# InternVL2-2B 模型卡:https://huggingface.co/OpenGVLab/InternVL2-2B#service

stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"]
stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens]
return llm, prompt, stop_token_ids


# BLIP-2
def run_blip2(question):

# BLIP-2 prompt format is inaccurate on HuggingFace model repository.
# See https://huggingface.co/Salesforce/blip2-opt-2.7b/discussions/15#64ff02f3f8cf9e4f5b038262 #noqa
# BLIP-2 的提示格式在 HuggingFace 模型库中不准确。
# 请参阅 https://huggingface.co/Salesforce/blip2-opt-2.7b/discussions/15#64ff02f3f8cf9e4f5b038262 #noqa

prompt = f"Question: {question} Answer:"
llm = LLM(model="Salesforce/blip2-opt-2.7b")
stop_token_ids = None
return llm, prompt, stop_token_ids


# Qwen
def run_qwen_vl(question):

llm = LLM(
model="Qwen/Qwen-VL",
trust_remote_code=True,
max_num_seqs=5,
)

prompt = f"{question}Picture 1: <img></img>\n"
stop_token_ids = None
return llm, prompt, stop_token_ids


model_example_map = {
"llava": run_llava,
"llava-next": run_llava_next,
"fuyu": run_fuyu,
"phi3_v": run_phi3v,
"paligemma": run_paligemma,
"chameleon": run_chameleon,
"minicpmv": run_minicpmv,
"blip-2": run_blip2,
"internvl_chat": run_internvl,
"qwen_vl": run_qwen_vl,
}


def main(args):
model = args.model_type
if model not in model_example_map:
raise ValueError(f"Model type {model} is not supported.")

llm, prompt, stop_token_ids = model_example_map[model](question)

# We set temperature to 0.2 so that outputs can be different
# even when all prompts are identical when running batch inference.
# 我们将温度设置为 0.2,以便即使在批量推理时所有提示相同,输出也可以有所不同。
sampling_params = SamplingParams(temperature=0.2,
max_tokens=64,
stop_token_ids=stop_token_ids)

assert args.num_prompts > 0
if args.num_prompts == 1:
# Single inference
# 单个接口
inputs = {
"prompt": prompt,
"multi_modal_data": {
"image": image
},
}

else:
# Batch inference
# 批接口
inputs = [{
"prompt": prompt,
"multi_modal_data": {
"image": image
},
} for _ in range(args.num_prompts)]

outputs = llm.generate(inputs, sampling_params=sampling_params)

for o in outputs:
generated_text = o.outputs[0].text
print(generated_text)


if __name__ == "__main__":
parser = FlexibleArgumentParser(
description='Demo on using vLLM for offline inference with '
'vision language models')
parser.add_argument('--model-type',
'-m',
type=str,
default="llava",
choices=model_example_map.keys(),
help='Huggingface "model_type".')
parser.add_argument('--num-prompts',
type=int,
default=1,
help='Number of prompts to run.')

args = parser.parse_args()
main(args)