🐛 Bug: Fix the bug that prevents the OpenAI API key from being used normally in zed.
Browse files- response.py +5 -5
response.py
CHANGED
|
@@ -32,7 +32,7 @@ async def generate_sse_response(timestamp, model, content=None, tools_id=None, f
|
|
| 32 |
json_data = json.dumps(sample_data, ensure_ascii=False)
|
| 33 |
|
| 34 |
# 构建SSE响应
|
| 35 |
-
sse_response = f"data: {json_data}\n\r"
|
| 36 |
|
| 37 |
return sse_response
|
| 38 |
|
|
@@ -90,7 +90,7 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
| 90 |
function_full_response = json.dumps(function_call["functionCall"]["args"])
|
| 91 |
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
| 92 |
yield sse_string
|
| 93 |
-
yield "data: [DONE]\n\r"
|
| 94 |
|
| 95 |
async def fetch_vertex_claude_response_stream(client, url, headers, payload, model):
|
| 96 |
timestamp = datetime.timestamp(datetime.now())
|
|
@@ -137,7 +137,7 @@ async def fetch_vertex_claude_response_stream(client, url, headers, payload, mod
|
|
| 137 |
function_full_response = json.dumps(function_call["input"])
|
| 138 |
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id=function_call_id, function_call_name=None, function_call_content=function_full_response)
|
| 139 |
yield sse_string
|
| 140 |
-
yield "data: [DONE]\n\r"
|
| 141 |
|
| 142 |
async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects=5):
|
| 143 |
redirect_count = 0
|
|
@@ -174,7 +174,7 @@ async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects
|
|
| 174 |
line, buffer = buffer.split("\n", 1)
|
| 175 |
# logger.info("line: %s", repr(line))
|
| 176 |
if line and line != "data: " and line != "data:" and not line.startswith(": "):
|
| 177 |
-
yield line + "\n\r"
|
| 178 |
except httpx.RemoteProtocolError as e:
|
| 179 |
yield {"error": f"fetch_gpt_response_stream RemoteProtocolError {e.__class__.__name__}", "details": str(e)}
|
| 180 |
return
|
|
@@ -236,7 +236,7 @@ async def fetch_claude_response_stream(client, url, headers, payload, model):
|
|
| 236 |
function_call_content = delta["partial_json"]
|
| 237 |
sse_string = await generate_sse_response(timestamp, model, None, None, None, function_call_content)
|
| 238 |
yield sse_string
|
| 239 |
-
yield "data: [DONE]\n\r"
|
| 240 |
|
| 241 |
async def fetch_response(client, url, headers, payload):
|
| 242 |
response = await client.post(url, headers=headers, json=payload)
|
|
|
|
| 32 |
json_data = json.dumps(sample_data, ensure_ascii=False)
|
| 33 |
|
| 34 |
# 构建SSE响应
|
| 35 |
+
sse_response = f"data: {json_data}\n\r\n"
|
| 36 |
|
| 37 |
return sse_response
|
| 38 |
|
|
|
|
| 90 |
function_full_response = json.dumps(function_call["functionCall"]["args"])
|
| 91 |
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
| 92 |
yield sse_string
|
| 93 |
+
yield "data: [DONE]\n\r\n"
|
| 94 |
|
| 95 |
async def fetch_vertex_claude_response_stream(client, url, headers, payload, model):
|
| 96 |
timestamp = datetime.timestamp(datetime.now())
|
|
|
|
| 137 |
function_full_response = json.dumps(function_call["input"])
|
| 138 |
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id=function_call_id, function_call_name=None, function_call_content=function_full_response)
|
| 139 |
yield sse_string
|
| 140 |
+
yield "data: [DONE]\n\r\n"
|
| 141 |
|
| 142 |
async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects=5):
|
| 143 |
redirect_count = 0
|
|
|
|
| 174 |
line, buffer = buffer.split("\n", 1)
|
| 175 |
# logger.info("line: %s", repr(line))
|
| 176 |
if line and line != "data: " and line != "data:" and not line.startswith(": "):
|
| 177 |
+
yield line.strip() + "\n\r\n"
|
| 178 |
except httpx.RemoteProtocolError as e:
|
| 179 |
yield {"error": f"fetch_gpt_response_stream RemoteProtocolError {e.__class__.__name__}", "details": str(e)}
|
| 180 |
return
|
|
|
|
| 236 |
function_call_content = delta["partial_json"]
|
| 237 |
sse_string = await generate_sse_response(timestamp, model, None, None, None, function_call_content)
|
| 238 |
yield sse_string
|
| 239 |
+
yield "data: [DONE]\n\r\n"
|
| 240 |
|
| 241 |
async def fetch_response(client, url, headers, payload):
|
| 242 |
response = await client.post(url, headers=headers, json=payload)
|