Skip to content

Commit 2ca14a3

Browse files
authored
fix: replace subscripting in OpenAI responses (#50)
1 parent 3b5437d commit 2ca14a3

File tree

2 files changed

+64
-2
lines changed

2 files changed

+64
-2
lines changed

.grit/patterns/python/openai.md

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,17 @@ pattern pytest_patch() {
198198
},
199199
}
200200
201+
202+
203+
// When there is a variable used by an openai call, make sure it isn't subscripted
204+
pattern fix_downstream_openai_usage() {
205+
$var where {
206+
$program <: maybe contains bubble($var) `$x['$y']` as $sub => `$x.$y` where {
207+
$sub <: contains $var
208+
}
209+
}
210+
}
211+
201212
pattern openai_main($client, $azure) {
202213
$body where {
203214
if ($client <: undefined) {
@@ -257,6 +268,9 @@ pattern openai_main($client, $azure) {
257268
contains `import openai` as $import_stmt where {
258269
$body <: contains bubble($has_sync, $has_async, $has_openai_import, $body, $client, $azure) `openai.$res.$func($params)` as $stmt where {
259270
$res <: rewrite_whole_fn_call(import = $has_openai_import, $has_sync, $has_async, $res, $func, $params, $stmt, $body, $client, $azure),
271+
$stmt <: maybe within bubble($stmt) `$var = $stmt` where {
272+
$var <: fix_downstream_openai_usage()
273+
}
260274
},
261275
},
262276
contains `from openai import $resources` as $partial_import_stmt where {
@@ -562,3 +576,51 @@ response = client.chat.completions.create(
562576
]
563577
)
564578
```
579+
580+
## Fix subscripting
581+
582+
The new API does not support subscripting on the outputs.
583+
584+
```python
585+
import openai
586+
587+
model, token_limit, prompt_cost, comp_cost = 'gpt-4-32k', 32_768, 0.06, 0.12
588+
589+
completion = openai.ChatCompletion.create(
590+
model=model,
591+
messages=[
592+
{"role": "system", "content": system},
593+
{"role": "user", "content":
594+
user + text},
595+
]
596+
)
597+
output = completion['choices'][0]['message']['content']
598+
599+
prom = completion['usage']['prompt_tokens']
600+
comp = completion['usage']['completion_tokens']
601+
602+
# unrelated variable
603+
foo = something['else']
604+
```
605+
606+
```python
607+
from openai import OpenAI
608+
609+
client = OpenAI()
610+
611+
model, token_limit, prompt_cost, comp_cost = 'gpt-4-32k', 32_768, 0.06, 0.12
612+
613+
completion = client.chat.completions.create(model=model,
614+
messages=[
615+
{"role": "system", "content": system},
616+
{"role": "user", "content":
617+
user + text},
618+
])
619+
output = completion.choices[0].message.content
620+
621+
prom = completion.usage.prompt_tokens
622+
comp = completion.usage.completion_tokens
623+
624+
# unrelated variable
625+
foo = something['else']
626+
```

.grit/patterns/python/openai_azure.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ response = client.chat.completions.create(
6464
]
6565
)
6666

67-
print(response['choices'][0]['message']['content'])
67+
print(response.choices[0].message.content)
6868
```
6969

7070
## Embeddings
@@ -99,7 +99,7 @@ response = client.embeddings.create(
9999
input="Your text string goes here",
100100
model="YOUR_DEPLOYMENT_NAME"
101101
)
102-
embeddings = response['data'][0]['embedding']
102+
embeddings = response.data[0].embedding
103103
print(embeddings)
104104
```
105105

0 commit comments

Comments
 (0)