forked from stackwiseai/stackwise
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path_fixGenerate.ts
More file actions
102 lines (93 loc) · 3.85 KB
/
_fixGenerate.ts
File metadata and controls
102 lines (93 loc) · 3.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import OpenAI from 'openai';
import { BoilerplateMetadata, Message } from '../lib/types';
import { processBoilerplate, readFunctionToString } from '../lib/utils';
import { combineSkeleton } from '../../createSkeleton';
import { openai } from '../openai/construct';
export default async function generateFunction(
briefSkeleton: string,
functionAndOutputSkeleton: string,
brief: string,
exampleBoilerplate: null | BoilerplateMetadata | BoilerplateMetadata[],
integration: string
): Promise<string> {
console.log(`integration in generateFunction:`, integration);
console.log(
`functionAndOutputSkeleton in generateFunction:`,
functionAndOutputSkeleton
);
console.log(`briefSkeleton in generateFunction:`, briefSkeleton);
console.log(
`combined in generateFunction:`,
combineSkeleton(briefSkeleton, functionAndOutputSkeleton)
);
if (integration === 'generic') {
// it's either null, meaning no example boilerplate was found or a string
const messages: Message[] = [];
messages.push({
role: 'system',
content: `You are an expert at writing functions that match the brief that the user provides. You can change the body of the function to whatever you want as long as you ensure that you keep the return type, function name, and parameters the same as the skeleton.
Feel free to make assumptions about what the user wants based on their input and output types and brief. If the user brief does not make sense or is not possible given the input and return types, just do your best and leave comments where the user would need to fill in the gaps.`,
});
// if example boilerplate not found, that means we just have a generic skeleton to build up on
if (exampleBoilerplate) {
const startingBoilerplate = processBoilerplate(exampleBoilerplate);
messages.push({
role: 'system',
content: `Here is an example function that you can learn from:
${startingBoilerplate}`,
});
}
try {
const response = await openai.chat.completions.create({
model: 'gpt-4',
messages: [
{
role: 'system',
content: `Here is the boilerplate that you are working with:
${functionAndOutputSkeleton}
Ensure that you keep the return type, function name, and parameters the same. You can change the body of the function to whatever you want.`,
},
{
role: 'user',
content: `Here is the user brief: ${brief}.
Respond with just what would go in the function file and nothing else. No explanation or words, just the contents of the file. Make sure that the code is runnable if I were to execute it directly.`,
},
],
temperature: 0.3,
});
return response.choices[0].message.content;
} catch (error) {
console.error('Error querying OpenAI:', error);
throw error;
}
} else {
// if non generic it means we have an integration
try {
// Dynamically import the module based on the integration variable
const integrationModule = await import(`../${integration}/construct`);
let startingBoilerplate;
if (!exampleBoilerplate) {
startingBoilerplate = await readFunctionToString(
`src/stack/integrations/${integration}/boilerplate.ts`
);
} else {
startingBoilerplate = processBoilerplate(exampleBoilerplate);
}
// Assuming the imported module has a default function that you need to call
const result = await integrationModule.default(
briefSkeleton,
functionAndOutputSkeleton,
brief,
exampleBoilerplate, // can be BoilerplateMetadata | null | BoilerplateMetadata[]
startingBoilerplate
);
return result;
} catch (error) {
console.error(
`Error importing module for integration "${integration}":`,
error
);
throw error;
}
}
}