使用 NVIDIA 端点进行图像推理的多模态大型语言模型¶
在本笔记本中,我们将展示如何使用 NVIDIA 多模态大型语言模型类/抽象进行图像理解/推理。
我们还将展示 NVIDIA 大型语言模型目前支持的几个函数:
complete
(同步和异步):用于单个提示和图像列表stream complete
(同步和异步):用于complete
的流式输出
In [ ]
已复制!
%pip install --upgrade --quiet llama-index-multi-modal-llms-nvidia llama-index-embeddings-nvidia llama-index-readers-file
%pip install --upgrade --quiet llama-index-multi-modal-llms-nvidia llama-index-embeddings-nvidia llama-index-readers-file
In [ ]
已复制!
import getpass
import os
# del os.environ['NVIDIA_API_KEY'] ## delete key and reset
if os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
print("Valid NVIDIA_API_KEY already in environment. Delete to reset")
else:
nvapi_key = getpass.getpass("NVAPI Key (starts with nvapi-): ")
assert nvapi_key.startswith(
"nvapi-"
), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
import getpass import os # del os.environ['NVIDIA_API_KEY'] ## delete key and reset if os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): print("Valid NVIDIA_API_KEY already in environment. Delete to reset") else: nvapi_key = getpass.getpass("NVAPI Key(以 nvapi- 开头):") assert nvapi_key.startswith( "nvapi-" ), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key
In [ ]
已复制!
import nest_asyncio
nest_asyncio.apply()
import nest_asyncio nest_asyncio.apply()
In [ ]
已复制!
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal
import base64
from llama_index.core.schema import ImageDocument
from PIL import Image
import requests
from io import BytesIO
# import matplotlib.pyplot as plt
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
llm = NVIDIAMultiModal()
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal import base64 from llama_index.core.schema import ImageDocument from PIL import Image import requests from io import BytesIO # import matplotlib.pyplot as plt from llama_index.core.multi_modal_llms.generic_utils import load_image_urls llm = NVIDIAMultiModal()
初始化 NVIDIAMultiModal
并从 URL 加载图像¶
In [ ]
已复制!
image_urls = [
"https://res.cloudinary.com/hello-tickets/image/upload/c_limit,f_auto,q_auto,w_1920/v1640835927/o3pfl41q7m5bj8jardk0.jpg",
"https://www.visualcapitalist.com/wp-content/uploads/2023/10/US_Mortgage_Rate_Surge-Sept-11-1.jpg",
"https://www.sportsnet.ca/wp-content/uploads/2023/11/CP1688996471-1040x572.jpg",
# Add yours here!
]
img_response = requests.get(image_urls[0])
img = Image.open(BytesIO(img_response.content))
# plt.imshow(img)
image_url_documents = load_image_urls(image_urls)
image_urls = [ "https://res.cloudinary.com/hello-tickets/image/upload/c_limit,f_auto,q_auto,w_1920/v1640835927/o3pfl41q7m5bj8jardk0.jpg", "https://www.visualcapitalist.com/wp-content/uploads/2023/10/US_Mortgage_Rate_Surge-Sept-11-1.jpg", "https://www.sportsnet.ca/wp-content/uploads/2023/11/CP1688996471-1040x572.jpg", # Add yours here! ] img_response = requests.get(image_urls[0]) img = Image.open(BytesIO(img_response.content)) # plt.imshow(img) image_url_documents = load_image_urls(image_urls)
使用多张图像完成一个提示¶
In [ ]
已复制!
response = llm.complete(
prompt=f"What is this image?",
image_documents=image_url_documents,
)
print(response)
response = llm.complete( prompt=f"这是什么图像?", image_documents=image_url_documents, ) print(response)
In [ ]
已复制!
await llm.acomplete(
prompt="tell me about this image",
image_documents=image_url_documents,
)
await llm.acomplete( prompt="告诉我关于这张图像的信息", image_documents=image_url_documents, )
流式传输使用多张图像完成一个提示¶
In [ ]
已复制!
stream_complete_response = llm.stream_complete(
prompt=f"What is this image?",
image_documents=image_url_documents,
)
stream_complete_response = llm.stream_complete( prompt=f"这是什么图像?", image_documents=image_url_documents, )
In [ ]
已复制!
for r in stream_complete_response:
print(r.text, end="")
for r in stream_complete_response: print(r.text, end="")
In [ ]
已复制!
stream_complete_response = await llm.astream_complete(
prompt=f"What is this image?",
image_documents=image_url_documents,
)
stream_complete_response = await llm.astream_complete( prompt=f"这是什么图像?", image_documents=image_url_documents, )
In [ ]
已复制!
last_element = None
async for last_element in stream_complete_response:
pass
print(last_element)
last_element = None async for last_element in stream_complete_response: pass print(last_element)
将图像作为 base64 编码字符串传递¶
In [ ]
已复制!
imgr_content = base64.b64encode(
requests.get(
"https://helloartsy.com/wp-content/uploads/kids/cats/how-to-draw-a-small-cat/how-to-draw-a-small-cat-step-6.jpg"
).content
).decode("utf-8")
llm.complete(
prompt="List models in image",
image_documents=[ImageDocument(image=imgr_content, mimetype="jpeg")],
)
imgr_content = base64.b64encode( requests.get( "https://helloartsy.com/wp-content/uploads/kids/cats/how-to-draw-a-small-cat/how-to-draw-a-small-cat-step-6.jpg" ).content ).decode("utf-8") llm.complete( prompt="列出图像中的模型", image_documents=[ImageDocument(image=imgr_content, mimetype="jpeg")], )
将图像作为 NVCF 资产传递¶
如果您的图像足够大,或者您将在聊天对话中多次传递它,您可以上传一次并在聊天对话中引用它
有关如何上传图像的详细信息,请参阅 https://docs.nvda.net.cn/cloud-functions/user-guide/latest/cloud-function/assets.html。
In [ ]
已复制!
import requests
content_type = "image/jpg"
description = "example-image-from-lc-nv-ai-e-notebook"
create_response = requests.post(
"https://api.nvcf.nvidia.com/v2/nvcf/assets",
headers={
"Authorization": f"Bearer {os.environ['NVIDIA_API_KEY']}",
"accept": "application/json",
"Content-Type": "application/json",
},
json={"contentType": content_type, "description": description},
)
create_response.raise_for_status()
upload_response = requests.put(
create_response.json()["uploadUrl"],
headers={
"Content-Type": content_type,
"x-amz-meta-nvcf-asset-description": description,
},
data=img_response.content,
)
upload_response.raise_for_status()
asset_id = create_response.json()["assetId"]
asset_id
import requests content_type = "image/jpg" description = "example-image-from-lc-nv-ai-e-notebook" create_response = requests.post( "https://api.nvcf.nvidia.com/v2/nvcf/assets", headers={ "Authorization": f"Bearer {os.environ['NVIDIA_API_KEY']}", "accept": "application/json", "Content-Type": "application/json", }, json={"contentType": content_type, "description": description}, ) create_response.raise_for_status() upload_response = requests.put( create_response.json()["uploadUrl"], headers={ "Content-Type": content_type, "x-amz-meta-nvcf-asset-description": description, }, data=img_response.content, ) upload_response.raise_for_status() asset_id = create_response.json()["assetId"] asset_id
In [ ]
已复制!
response = llm.stream_complete(
prompt=f"Describe the image",
image_documents=[
ImageDocument(metadata={"asset_id": asset_id}, mimetype="png")
],
)
response = llm.stream_complete( prompt=f"描述这张图像", image_documents=[ ImageDocument(metadata={"asset_id": asset_id}, mimetype="png") ], )
In [ ]
已复制!
for r in response:
print(r.text, end="")
for r in response: print(r.text, end="")
从本地文件传递图像¶
In [ ]
已复制!
from llama_index.core import SimpleDirectoryReader
# put your local directore here
image_documents = SimpleDirectoryReader("./tests/data/").load_data()
llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
from llama_index.core import SimpleDirectoryReader # 在此处放入你的本地目录 image_documents = SimpleDirectoryReader("./tests/data/").load_data() llm.complete( prompt="将图像描述为替代文本", image_documents=image_documents, )
与图像聊天¶
In [ ]
已复制!
from llama_index.core.llms import ChatMessage
llm.chat(
[
ChatMessage(
role="user",
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": image_urls[1]},
],
)
]
)
from llama_index.core.llms import ChatMessage llm.chat( [ ChatMessage( role="user", content=[ {"type": "text", "text": "描述这张图像:"}, {"type": "image_url", "image_url": image_urls[1]}, ], ) ] )
In [ ]
已复制!
from llama_index.core.llms import ChatMessage
await llm.achat(
[
ChatMessage(
role="user",
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": image_urls[1]},
],
)
]
)
from llama_index.core.llms import ChatMessage await llm.achat( [ ChatMessage( role="user", content=[ {"type": "text", "text": "描述这张图像:"}, {"type": "image_url", "image_url": image_urls[1]}, ], ) ] )
In [ ]
已复制!
llm.chat(
[
ChatMessage(
role="user",
content=[
{"type": "text", "text": "Describe the image"},
{
"type": "image_url",
"image_url": f'<img src="data:{content_type};asset_id,{asset_id}" />',
},
],
)
]
)
llm.chat( [ ChatMessage( role="user", content=[ {"type": "text", "text": "描述这张图像"}, { "type": "image_url", "image_url": f'
', }, ], ) ] )
In [ ]
已复制!
await llm.achat(
[
ChatMessage(
role="user",
content=[
{"type": "text", "text": "Describe the image"},
{
"type": "image_url",
"image_url": f'<img src="data:{content_type};asset_id,{asset_id}" />',
},
],
)
]
)
await llm.achat( [ ChatMessage( role="user", content=[ {"type": "text", "text": "描述这张图像"}, { "type": "image_url", "image_url": f'
', }, ], ) ] )
流式聊天一个带有图像的提示¶
In [ ]
已复制!
from llama_index.core.llms import ChatMessage
streaming_resp = llm.stream_chat(
[
ChatMessage(
role="user",
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": image_urls[1]},
],
)
]
)
from llama_index.core.llms import ChatMessage streaming_resp = llm.stream_chat( [ ChatMessage( role="user", content=[ {"type": "text", "text": "描述这张图像:"}, {"type": "image_url", "image_url": image_urls[1]}, ], ) ] )
In [ ]
已复制!
for r in streaming_resp:
print(r.delta, end="")
for r in streaming_resp: print(r.delta, end="")
In [ ]
已复制!
from llama_index.core.llms import ChatMessage
resp = await llm.astream_chat(
[
ChatMessage(
role="user",
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": image_urls[0]},
],
)
]
)
from llama_index.core.llms import ChatMessage resp = await llm.astream_chat( [ ChatMessage( role="user", content=[ {"type": "text", "text": "描述这张图像:"}, {"type": "image_url", "image_url": image_urls[0]}, ], ) ] )
In [ ]
已复制!
last_element = None
async for last_element in resp:
pass
print(last_element)
last_element = None async for last_element in resp: pass print(last_element)
In [ ]
已复制!
response = llm.stream_chat(
[
ChatMessage(
role="user",
content=f"""<img src="data:image/jpg;
,{asset_id}"/>""",
)
]
)
response = llm.stream_chat( [ ChatMessage( role="user", content=f"""
""", ) ] )
In [ ]
已复制!
for r in response:
print(r.delta, end="")
for r in response: print(r.delta, end="")