1+ #!/usr/bin/env python
2+ # -*- coding: utf-8 -*-
3+ # @Desc : refs to openai 0.x sdk
4+
15import asyncio
26import json
37import os
4347# Has one attribute per thread, 'session'.
4448_thread_context = threading .local ()
4549
46- OPENAI_LOG = os .environ .get ("OPENAI_LOG " )
47- OPENAI_LOG = "debug"
50+ LLM_LOG = os .environ .get ("LLM_LOG " )
51+ LLM_LOG = "debug"
4852
4953
5054class ApiType (Enum ):
@@ -74,8 +78,8 @@ def from_str(label):
7478
7579
7680def _console_log_level ():
77- if OPENAI_LOG in ["debug" , "info" ]:
78- return OPENAI_LOG
81+ if LLM_LOG in ["debug" , "info" ]:
82+ return LLM_LOG
7983 else :
8084 return None
8185
@@ -140,7 +144,7 @@ def operation_location(self) -> Optional[str]:
140144
141145 @property
142146 def organization (self ) -> Optional [str ]:
143- return self ._headers .get ("OpenAI -Organization" )
147+ return self ._headers .get ("LLM -Organization" )
144148
145149 @property
146150 def response_ms (self ) -> Optional [int ]:
@@ -478,7 +482,7 @@ def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False
478482 error_data ["message" ] += "\n \n " + error_data ["internal_message" ]
479483
480484 log_info (
481- "OpenAI API error received" ,
485+ "LLM API error received" ,
482486 error_code = error_data .get ("code" ),
483487 error_type = error_data .get ("type" ),
484488 error_message = error_data .get ("message" ),
@@ -516,7 +520,7 @@ def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False
516520 )
517521
518522 def request_headers (self , method : str , extra , request_id : Optional [str ]) -> Dict [str , str ]:
519- user_agent = "OpenAI /v1 PythonBindings/%s" % (version .VERSION ,)
523+ user_agent = "LLM /v1 PythonBindings/%s" % (version .VERSION ,)
520524
521525 uname_without_node = " " .join (v for k , v in platform .uname ()._asdict ().items () if k != "node" )
522526 ua = {
@@ -530,17 +534,17 @@ def request_headers(self, method: str, extra, request_id: Optional[str]) -> Dict
530534 }
531535
532536 headers = {
533- "X-OpenAI -Client-User-Agent" : json .dumps (ua ),
537+ "X-LLM -Client-User-Agent" : json .dumps (ua ),
534538 "User-Agent" : user_agent ,
535539 }
536540
537541 headers .update (api_key_to_header (self .api_type , self .api_key ))
538542
539543 if self .organization :
540- headers ["OpenAI -Organization" ] = self .organization
544+ headers ["LLM -Organization" ] = self .organization
541545
542546 if self .api_version is not None and self .api_type == ApiType .OPEN_AI :
543- headers ["OpenAI -Version" ] = self .api_version
547+ headers ["LLM -Version" ] = self .api_version
544548 if request_id is not None :
545549 headers ["X-Request-Id" ] = request_id
546550 headers .update (extra )
@@ -592,15 +596,14 @@ def _prepare_request_raw(
592596 headers ["Content-Type" ] = "application/json"
593597 else :
594598 raise openai .APIConnectionError (
595- "Unrecognized HTTP method %r. This may indicate a bug in the "
596- "OpenAI bindings. Please contact us through our help center at help.openai.com for "
597- "assistance." % (method ,)
599+ message = f"Unrecognized HTTP method { method } . This may indicate a bug in the LLM bindings." ,
600+ request = None ,
598601 )
599602
600603 headers = self .request_headers (method , headers , request_id )
601604
602- log_debug ("Request to OpenAI API" , method = method , path = abs_url )
603- log_debug ("Post details" , data = data , api_version = self .api_version )
605+ # log_debug("Request to LLM API", method=method, path=abs_url)
606+ # log_debug("Post details", data=data, api_version=self.api_version)
604607
605608 return abs_url , headers , data
606609
@@ -639,14 +642,14 @@ def request_raw(
639642 except requests .exceptions .Timeout as e :
640643 raise openai .APITimeoutError ("Request timed out: {}" .format (e )) from e
641644 except requests .exceptions .RequestException as e :
642- raise openai .APIConnectionError ("Error communicating with OpenAI : {}" .format (e )) from e
643- log_debug (
644- "OpenAI API response" ,
645- path = abs_url ,
646- response_code = result .status_code ,
647- processing_ms = result .headers .get ("OpenAI -Processing-Ms" ),
648- request_id = result .headers .get ("X-Request-Id" ),
649- )
645+ raise openai .APIConnectionError (message = "Error communicating with LLM : {}" .format (e ), request = None ) from e
646+ # log_debug(
647+ # "LLM API response",
648+ # path=abs_url,
649+ # response_code=result.status_code,
650+ # processing_ms=result.headers.get("LLM -Processing-Ms"),
651+ # request_id=result.headers.get("X-Request-Id"),
652+ # )
650653 return result
651654
652655 async def arequest_raw (
@@ -685,18 +688,18 @@ async def arequest_raw(
685688 }
686689 try :
687690 result = await session .request (** request_kwargs )
688- log_info (
689- "OpenAI API response" ,
690- path = abs_url ,
691- response_code = result .status ,
692- processing_ms = result .headers .get ("OpenAI -Processing-Ms" ),
693- request_id = result .headers .get ("X-Request-Id" ),
694- )
691+ # log_info(
692+ # "LLM API response",
693+ # path=abs_url,
694+ # response_code=result.status,
695+ # processing_ms=result.headers.get("LLM -Processing-Ms"),
696+ # request_id=result.headers.get("X-Request-Id"),
697+ # )
695698 return result
696699 except (aiohttp .ServerTimeoutError , asyncio .TimeoutError ) as e :
697700 raise openai .APITimeoutError ("Request timed out" ) from e
698701 except aiohttp .ClientError as e :
699- raise openai .APIConnectionError ("Error communicating with OpenAI" ) from e
702+ raise openai .APIConnectionError (message = "Error communicating with LLM" , request = None ) from e
700703
701704 def _interpret_response (
702705 self , result : requests .Response , stream : bool
0 commit comments