You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
llama stack 0.0.53
llama stack client for python 0.0.36
Information
The official example scripts
My own modified scripts
🐛 Describe the bug
When I want to use llama stack to inference the miti-model Llama-11B-Vision-Instrcut like the example ipynb code, the error occurs
Error logs
Traceback (most recent call last):
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_transports/default.py", line 72, in map_httpcore_exceptions
yield
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_transports/default.py", line 377, in handle_async_request
resp = await self._pool.handle_async_request(req)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 207, in handle_async_request
raise UnsupportedProtocol(
httpcore.UnsupportedProtocol: Request URL is missing an 'http://' or 'https://' protocol.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/distribution/server/server.py", line 210, in sse_generator
event_gen = await event_gen
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/distribution/routers/routers.py", line 102, in chat_completion
return (chunk async for chunk in await provider.chat_completion(**params))
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 220, in chat_completion
request = await request_with_localized_media(request)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 421, in request_with_localized_media
m.content = await _convert_content(m.content)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 415, in _convert_content
return [await _convert_single_content(c) for c in content]
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 415, in
return [await _convert_single_content(c) for c in content]
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 408, in _convert_single_content
url = await convert_image_media_to_url(content, download=True)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/utils/inference/prompt_adapter.py", line 77, in convert_image_media_to_url
r = await client.get(media.image.uri)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1814, in get
return await self.request(
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1585, in request
return await self.send(request, auth=auth, follow_redirects=follow_redirects)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1674, in send
response = await self._send_handling_auth(
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1702, in _send_handling_auth
response = await self._send_handling_redirects(
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1739, in _send_handling_redirects
response = await self._send_single_request(request)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1776, in _send_single_request
response = await transport.handle_async_request(request)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_transports/default.py", line 376, in handle_async_request
with map_httpcore_exceptions():
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/contextlib.py", line 153, in exit
self.gen.throw(typ, value, traceback)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_transports/default.py", line 89, in map_httpcore_exceptions
raise mapped_exc(message) from exc
httpx.UnsupportedProtocol: Request URL is missing an 'http://' or 'https://' protocol.
Expected behavior
I need the model can inference on describing a pic.
The text was updated successfully, but these errors were encountered:
System Info
llama stack 0.0.53
llama stack client for python 0.0.36
Information
🐛 Describe the bug
When I want to use llama stack to inference the miti-model Llama-11B-Vision-Instrcut like the example ipynb code, the error occurs
Error logs
Traceback (most recent call last):
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_transports/default.py", line 72, in map_httpcore_exceptions
yield
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_transports/default.py", line 377, in handle_async_request
resp = await self._pool.handle_async_request(req)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 207, in handle_async_request
raise UnsupportedProtocol(
httpcore.UnsupportedProtocol: Request URL is missing an 'http://' or 'https://' protocol.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/distribution/server/server.py", line 210, in sse_generator
event_gen = await event_gen
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/distribution/routers/routers.py", line 102, in chat_completion
return (chunk async for chunk in await provider.chat_completion(**params))
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 220, in chat_completion
request = await request_with_localized_media(request)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 421, in request_with_localized_media
m.content = await _convert_content(m.content)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 415, in _convert_content
return [await _convert_single_content(c) for c in content]
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 415, in
return [await _convert_single_content(c) for c in content]
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/inline/inference/meta_reference/inference.py", line 408, in _convert_single_content
url = await convert_image_media_to_url(content, download=True)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/llama_stack/providers/utils/inference/prompt_adapter.py", line 77, in convert_image_media_to_url
r = await client.get(media.image.uri)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1814, in get
return await self.request(
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1585, in request
return await self.send(request, auth=auth, follow_redirects=follow_redirects)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1674, in send
response = await self._send_handling_auth(
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1702, in _send_handling_auth
response = await self._send_handling_redirects(
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1739, in _send_handling_redirects
response = await self._send_single_request(request)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_client.py", line 1776, in _send_single_request
response = await transport.handle_async_request(request)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_transports/default.py", line 376, in handle_async_request
with map_httpcore_exceptions():
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/contextlib.py", line 153, in exit
self.gen.throw(typ, value, traceback)
File "/opt/conda/envs/llamastack-llama3.2/lib/python3.10/site-packages/httpx/_transports/default.py", line 89, in map_httpcore_exceptions
raise mapped_exc(message) from exc
httpx.UnsupportedProtocol: Request URL is missing an 'http://' or 'https://' protocol.
Expected behavior
I need the model can inference on describing a pic.
The text was updated successfully, but these errors were encountered: