You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Letta.letta.server.server - ERROR - Error in server._step: API call got non-200 response code (code=500, msg={"error":"llama runner process has terminated: exit status 2"}) for address: http://localhost:11434/api/generate. Make sure that the ollama API server is running and reachable at http://localhost:11434/api/generate.
Traceback (mostrecentcalllast):
File"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\server\server.py", line450, in_stepusage_stats=letta_agent.step(
^^^^^^^^^^^^^^^^^File"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\agent.py", line910, instepstep_response=self.inner_step(
^^^^^^^^^^^^^^^^File"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\agent.py", line1111, ininner_stepraiseeFile"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\agent.py", line1026, ininner_stepresponse=self._get_ai_reply(
^^^^^^^^^^^^^^^^^^^File"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\agent.py", line650, in_get_ai_replyraiseeFile"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\agent.py", line613, in_get_ai_replyresponse=create(
^^^^^^^File"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\llm_api\llm_api_tools.py", line100, inwrapperraiseeFile"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\llm_api\llm_api_tools.py", line69, inwrapperreturnfunc(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^File"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\llm_api\llm_api_tools.py", line389, increatereturnget_chat_completion(
^^^^^^^^^^^^^^^^^^^^File"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\local_llm\chat_completion_proxy.py", line167, inget_chat_completionresult, usage=get_ollama_completion(endpoint, auth_type, auth_key, model, prompt, context_window)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File"C:\Users\akidra\AppData\Roaming\Python\Python311\site-packages\letta\local_llm\ollama\api.py", line68, inget_ollama_completionraiseException(
Exception: APIcallgotnon-200responsecode (code=500, msg={"error":"llama runner process has terminated: exit status 2"}) foraddress: http://localhost:11434/api/generate. MakesurethattheollamaAPIserverisrunningandreachableathttp://localhost:11434/api/generate.
None---------------------------------------------------------------------------ExceptionTraceback (mostrecentcalllast)
CellIn[50], line236agent_state=client.create_agent(
7llm_config=LLMConfig(
8model="qwen2.5:0.5b",
(...)
19 )
20 )
22# Message an agent--->23response=client.send_message(
24agent_id=agent_state.id,
25role="user",
26message="hello"27 )
28print("Usage", response.usage)
29print("Agent messages", response.messages)
File~\AppData\Roaming\Python\Python311\site-packages\letta\client\client.py:2488, inLocalClient.send_message(self, message, role, name, agent_id, agent_name, stream_steps, stream_tokens)
2485raiseNotImplementedError2486self.interface.clear()
->2488usage=self.server.send_messages(
2489actor=self.user,
2490agent_id=agent_id,
2491messages=[MessageCreate(role=MessageRole(role), text=message, name=name)],
2492 )
2494## TODO: need to make sure date/timestamp is propely passed2495## TODO: update self.interface.to_list() to return actual Message objects2496## here, the message objects will have faulty created_by timestamps
(...)
25042505# format messages2506messages=self.interface.to_list()
File~\AppData\Roaming\Python\Python311\site-packages\letta\server\server.py:761, inSyncServer.send_messages(self, actor, agent_id, messages, wrap_user_message, wrap_system_message, interface)
758raiseValueError(f"All messages must be of type Message or MessageCreate, got {[type(message) formessageinmessages]}")
760# Run the agent state forward-->761returnself._step(actor=actor, agent_id=agent_id, input_messages=message_objects, interface=interface)
File~\AppData\Roaming\Python\Python311\site-packages\letta\server\server.py:450, inSyncServer._step(self, actor, agent_id, input_messages, interface)
447token_streaming=letta_agent.interface.streaming_modeifhasattr(letta_agent.interface, "streaming_mode") elseFalse449logger.debug(f"Starting agent step")
-->450usage_stats=letta_agent.step(
451messages=input_messages,
452chaining=self.chaining,
453max_chaining_steps=self.max_chaining_steps,
454stream=token_streaming,
455skip_verify=True,
456 )
458# save agent after step459save_agent(letta_agent)
File~\AppData\Roaming\Python\Python311\site-packages\letta\agent.py:910, inAgent.step(self, messages, chaining, max_chaining_steps, **kwargs)
908kwargs["first_message"] =False909kwargs["step_count"] =step_count-->910step_response=self.inner_step(
911messages=next_input_message,
912**kwargs,
913 )
914heartbeat_request=step_response.heartbeat_request915function_failed=step_response.function_failedFile~\AppData\Roaming\Python\Python311\site-packages\letta\agent.py:1111, inAgent.inner_step(self, messages, first_message, first_message_retry_limit, skip_verify, stream, step_count)
1109else:
1110printd(f"step() failed with an unrecognized exception: '{str(e)}'")
->1111raiseeFile~\AppData\Roaming\Python\Python311\site-packages\letta\agent.py:1026, inAgent.inner_step(self, messages, first_message, first_message_retry_limit, skip_verify, stream, step_count)
1023raiseException(f"Hit first message retry limit ({first_message_retry_limit})")
1025else:
->1026response=self._get_ai_reply(
1027message_sequence=input_message_sequence,
1028first_message=first_message,
1029stream=stream,
1030step_count=step_count,
1031 )
1033# Step 3: check if LLM wanted to call a function1034# (if yes) Step 4: call the function1035# (if yes) Step 5: send the info on the function call and function response to LLM1036response_message=response.choices[0].messageFile~\AppData\Roaming\Python\Python311\site-packages\letta\agent.py:650, inAgent._get_ai_reply(self, message_sequence, function_call, first_message, stream, empty_response_retry_limit, backoff_factor, max_delay, step_count)
646time.sleep(delay)
648exceptExceptionase:
649# For non-retryable errors, exit immediately-->650raisee652raiseException("Retries exhausted and no valid response received.")
File~\AppData\Roaming\Python\Python311\site-packages\letta\agent.py:613, inAgent._get_ai_reply(self, message_sequence, function_call, first_message, stream, empty_response_retry_limit, backoff_factor, max_delay, step_count)
611forattemptinrange(1, empty_response_retry_limit+1):
612try:
-->613response=create(
614llm_config=self.agent_state.llm_config,
615messages=message_sequence,
616user_id=self.agent_state.created_by_id,
617functions=allowed_functions,
618# functions_python=self.functions_python, do we need this?619function_call=function_call,
620first_message=first_message,
621force_tool_call=force_tool_call,
622stream=stream,
623stream_interface=self.interface,
624 )
626# These bottom two are retryable627iflen(response.choices) ==0orresponse.choices[0] isNone:
File~\AppData\Roaming\Python\Python311\site-packages\letta\llm_api\llm_api_tools.py:100, inretry_with_exponential_backoff.<locals>.wrapper(*args, **kwargs)
98# Raise exceptions for any errors not specified99exceptExceptionase:
-->100raiseeFile~\AppData\Roaming\Python\Python311\site-packages\letta\llm_api\llm_api_tools.py:69, inretry_with_exponential_backoff.<locals>.wrapper(*args, **kwargs)
67whileTrue:
68try:
--->69returnfunc(*args, **kwargs)
71exceptrequests.exceptions.HTTPErrorashttp_err:
73ifnothasattr(http_err, "response") ornothttp_err.response:
File~\AppData\Roaming\Python\Python311\site-packages\letta\llm_api\llm_api_tools.py:389, increate(llm_config, messages, user_id, functions, functions_python, function_call, first_message, force_tool_call, use_tool_naming, stream, stream_interface, max_tokens, model_settings)
387ifstream:
388raiseNotImplementedError(f"Streaming not yet implemented for {llm_config.model_endpoint_type}")
-->389returnget_chat_completion(
390model=llm_config.model,
391messages=messages,
392functions=functions,
393functions_python=functions_python,
394function_call=function_call,
395context_window=llm_config.context_window,
396endpoint=llm_config.model_endpoint,
397endpoint_type=llm_config.model_endpoint_type,
398wrapper=llm_config.model_wrapper,
399user=str(user_id),
400# hint401first_message=first_message,
402# auth-related403auth_type=model_settings.openllm_auth_type,
404auth_key=model_settings.openllm_api_key,
405 )
File~\AppData\Roaming\Python\Python311\site-packages\letta\local_llm\chat_completion_proxy.py:167, inget_chat_completion(model, messages, functions, functions_python, function_call, context_window, user, wrapper, endpoint, endpoint_type, function_correction, first_message, auth_type, auth_key)
165result, usage=get_koboldcpp_completion(endpoint, auth_type, auth_key, prompt, context_window, grammar=grammar)
166elifendpoint_type=="ollama":
-->167result, usage=get_ollama_completion(endpoint, auth_type, auth_key, model, prompt, context_window)
168elifendpoint_type=="vllm":
169result, usage=get_vllm_completion(endpoint, auth_type, auth_key, model, prompt, context_window, user)
File~\AppData\Roaming\Python\Python311\site-packages\letta\local_llm\ollama\api.py:68, inget_ollama_completion(endpoint, auth_type, auth_key, model, prompt, context_window, grammar)
66result=result_full["response"]
67else:
--->68raiseException(
69f"API call got non-200 response code (code={response.status_code}, msg={response.text}) for address: {URI}."70+f" Make sure that the ollama API server is running and reachable at {URI}."71 )
73except:
74# TODO handle gracefully75raiseException: APIcallgotnon-200responsecode (code=500, msg={"error":"llama runner process has terminated: exit status 2"}) foraddress: http://localhost:11434/api/generate. MakesurethattheollamaAPIserverisrunningandreachableathttp://localhost:11434/api/generate.
The text was updated successfully, but these errors were encountered:
Bug description
Install
Agent setting
Launch ollama
Launch agent
Response
The text was updated successfully, but these errors were encountered: