Skip to content

Commit 17027c4

Browse files
maryhippMary Hipppsychedelicious
authored
Maryhipp/chatgpt UI (#7969)
* add GPTimage1 as allowed base model * fix for non-disabled inpaint layers * lots of boilerplate for adding gpt-image base model and disabling things along with imagen * handle gpt-image dimensions * build graph for gpt-image * lint * feat(ui): make chatgpt model naming consistent * feat(ui): graph builder naming * feat(ui): disable img2img for imagen3 * feat(ui): more naming * feat(ui): support presigned url prefetch * feat(ui): disable neg prompt for chatgpt * docs(ui): update docstring * feat(ui): fix graph building issues for chatgpt * fix(ui): node ids for chatgpt/imagen * chore(ui): typegen --------- Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local> Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
1 parent 13d44f4 commit 17027c4

File tree

31 files changed

+282
-443
lines changed

31 files changed

+282
-443
lines changed

invokeai/frontend/web/public/locales/en.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1322,7 +1322,7 @@
13221322
"unableToCopyDesc": "Your browser does not support clipboard access. Firefox users may be able to fix this by following ",
13231323
"unableToCopyDesc_theseSteps": "these steps",
13241324
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill is not compatible with Text to Image or Image to Image. Use other FLUX models for these tasks.",
1325-
"image3IncompatibleWithInpaintAndOutpaint": "Imagen3 does not support Inpainting or Outpainting. Use other models for these tasks.",
1325+
"imagen3IncompatibleGenerationMode": "Imagen3 only supports Text to Image. Use other models for Image to Image, Inpainting and Outpainting tasks.",
13261326
"problemUnpublishingWorkflow": "Problem Unpublishing Workflow",
13271327
"problemUnpublishingWorkflowDescription": "There was a problem unpublishing the workflow. Please try again.",
13281328
"workflowUnpublished": "Workflow Unpublished"

invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import { withResult, withResultAsync } from 'common/util/result';
66
import { parseify } from 'common/util/serialize';
77
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
88
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
9+
import { buildChatGPT4oGraph } from 'features/nodes/util/graph/generation/buildChatGPT4oGraph';
910
import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph';
1011
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
1112
import { buildImagen3Graph } from 'features/nodes/util/graph/generation/buildImagen3Graph';
@@ -51,6 +52,8 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
5152
return await buildCogView4Graph(state, manager);
5253
case 'imagen3':
5354
return await buildImagen3Graph(state, manager);
55+
case 'chatgpt-4o':
56+
return await buildChatGPT4oGraph(state, manager);
5457
default:
5558
assert(false, `No graph builders for base ${base}`);
5659
}
@@ -76,15 +79,15 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
7679
const destination = state.canvasSettings.sendToCanvas ? 'canvas' : 'gallery';
7780

7881
const prepareBatchResult = withResult(() =>
79-
prepareLinearUIBatch(
82+
prepareLinearUIBatch({
8083
state,
8184
g,
8285
prepend,
8386
seedFieldIdentifier,
8487
positivePromptFieldIdentifier,
85-
'canvas',
86-
destination
87-
)
88+
origin: 'canvas',
89+
destination,
90+
})
8891
);
8992

9093
if (prepareBatchResult.isErr()) {

invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedUpscale.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,15 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
2020

2121
const { g, seedFieldIdentifier, positivePromptFieldIdentifier } = await buildMultidiffusionUpscaleGraph(state);
2222

23-
const batchConfig = prepareLinearUIBatch(
23+
const batchConfig = prepareLinearUIBatch({
2424
state,
2525
g,
2626
prepend,
2727
seedFieldIdentifier,
2828
positivePromptFieldIdentifier,
29-
'upscaling',
30-
'gallery'
31-
);
29+
origin: 'upscaling',
30+
destination: 'gallery',
31+
});
3232

3333
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
3434
try {

invokeai/frontend/web/src/features/controlLayers/components/CanvasAddEntityButtons.tsx

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ export const CanvasAddEntityButtons = memo(() => {
2424
const isReferenceImageEnabled = useIsEntityTypeEnabled('reference_image');
2525
const isRegionalGuidanceEnabled = useIsEntityTypeEnabled('regional_guidance');
2626
const isControlLayerEnabled = useIsEntityTypeEnabled('control_layer');
27+
const isInpaintLayerEnabled = useIsEntityTypeEnabled('inpaint_mask');
2728

2829
return (
2930
<Flex w="full" h="full" justifyContent="center" gap={4}>
@@ -52,6 +53,7 @@ export const CanvasAddEntityButtons = memo(() => {
5253
justifyContent="flex-start"
5354
leftIcon={<PiPlusBold />}
5455
onClick={addInpaintMask}
56+
isDisabled={!isInpaintLayerEnabled}
5557
>
5658
{t('controlLayers.inpaintMask')}
5759
</Button>

invokeai/frontend/web/src/features/controlLayers/components/CanvasEntityList/EntityListGlobalActionBarAddLayerMenu.tsx

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
2525
const isReferenceImageEnabled = useIsEntityTypeEnabled('reference_image');
2626
const isRegionalGuidanceEnabled = useIsEntityTypeEnabled('regional_guidance');
2727
const isControlLayerEnabled = useIsEntityTypeEnabled('control_layer');
28+
const isInpaintLayerEnabled = useIsEntityTypeEnabled('inpaint_mask');
2829

2930
return (
3031
<Menu>
@@ -46,7 +47,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
4647
</MenuItem>
4748
</MenuGroup>
4849
<MenuGroup title={t('controlLayers.regional')}>
49-
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
50+
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask} isDisabled={!isInpaintLayerEnabled}>
5051
{t('controlLayers.inpaintMask')}
5152
</MenuItem>
5253
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={!isRegionalGuidanceEnabled}>
Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
11
import { useAppSelector } from 'app/store/storeHooks';
2-
import { selectIsCogView4, selectIsImagen3, selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
2+
import {
3+
selectIsChatGTP4o,
4+
selectIsCogView4,
5+
selectIsImagen3,
6+
selectIsSD3,
7+
} from 'features/controlLayers/store/paramsSlice';
38
import type { CanvasEntityType } from 'features/controlLayers/store/types';
49
import { useMemo } from 'react';
510
import type { Equals } from 'tsafe';
@@ -9,23 +14,24 @@ export const useIsEntityTypeEnabled = (entityType: CanvasEntityType) => {
914
const isSD3 = useAppSelector(selectIsSD3);
1015
const isCogView4 = useAppSelector(selectIsCogView4);
1116
const isImagen3 = useAppSelector(selectIsImagen3);
17+
const isChatGPT4o = useAppSelector(selectIsChatGTP4o);
1218

1319
const isEntityTypeEnabled = useMemo<boolean>(() => {
1420
switch (entityType) {
1521
case 'reference_image':
16-
return !isSD3 && !isCogView4 && !isImagen3;
22+
return !isSD3 && !isCogView4 && !isImagen3 && !isChatGPT4o;
1723
case 'regional_guidance':
18-
return !isSD3 && !isCogView4 && !isImagen3;
24+
return !isSD3 && !isCogView4 && !isImagen3 && !isChatGPT4o;
1925
case 'control_layer':
20-
return !isSD3 && !isCogView4 && !isImagen3;
26+
return !isSD3 && !isCogView4 && !isImagen3 && !isChatGPT4o;
2127
case 'inpaint_mask':
22-
return !isImagen3;
28+
return !isImagen3 && !isChatGPT4o;
2329
case 'raster_layer':
24-
return !isImagen3;
30+
return !isImagen3 && !isChatGPT4o;
2531
default:
2632
assert<Equals<typeof entityType, never>>(false);
2733
}
28-
}, [entityType, isSD3, isCogView4, isImagen3]);
34+
}, [entityType, isSD3, isCogView4, isImagen3, isChatGPT4o]);
2935

3036
return isEntityTypeEnabled;
3137
};

invokeai/frontend/web/src/features/controlLayers/konva/CanvasObject/CanvasObjectImage.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
112112
return;
113113
}
114114

115-
const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url));
115+
const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url, true));
116116
if (imageElementResult.isErr()) {
117117
// Image loading failed (e.g. the URL to the "physical" image is invalid)
118118
this.onFailedToLoadImage(t('controlLayers.unableToLoadImage', 'Unable to load image'));

invokeai/frontend/web/src/features/controlLayers/konva/CanvasTool/CanvasBboxToolModule.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -235,8 +235,8 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
235235
if (tool !== 'bbox') {
236236
return NO_ANCHORS;
237237
}
238-
if (model?.base === 'imagen3') {
239-
// The bbox is not resizable in imagen3 mode
238+
if (model?.base === 'imagen3' || model?.base === 'chatgpt-4o') {
239+
// The bbox is not resizable in these modes
240240
return NO_ANCHORS;
241241
}
242242
return ALL_ANCHORS;

invokeai/frontend/web/src/features/controlLayers/konva/util.ts

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -476,15 +476,24 @@ export function getImageDataTransparency(imageData: ImageData): Transparency {
476476
/**
477477
* Loads an image from a URL and returns a promise that resolves with the loaded image element.
478478
* @param src The image source URL
479+
* @param fetchUrlFirst Whether to fetch the image's URL first, assuming the provided `src` will redirect to a different URL. This addresses an issue where CORS headers are dropped during a redirect.
479480
* @returns A promise that resolves with the loaded image element
480481
*/
481-
export function loadImage(src: string): Promise<HTMLImageElement> {
482+
export async function loadImage(src: string, fetchUrlFirst?: boolean): Promise<HTMLImageElement> {
483+
const authToken = $authToken.get();
484+
let url = src;
485+
if (authToken && fetchUrlFirst) {
486+
const response = await fetch(`${src}?url_only=true`, { credentials: 'include' });
487+
const data = await response.json();
488+
url = data.url;
489+
}
490+
482491
return new Promise((resolve, reject) => {
483492
const imageElement = new Image();
484493
imageElement.onload = () => resolve(imageElement);
485494
imageElement.onerror = (error) => reject(error);
486495
imageElement.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous';
487-
imageElement.src = src;
496+
imageElement.src = url;
488497
});
489498
}
490499

invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ import type {
6767
IPMethodV2,
6868
T2IAdapterConfig,
6969
} from './types';
70-
import { getEntityIdentifier, isImagen3AspectRatioID, isRenderableEntity } from './types';
70+
import { getEntityIdentifier, isChatGPT4oAspectRatioID, isImagen3AspectRatioID, isRenderableEntity } from './types';
7171
import {
7272
converters,
7373
getControlLayerState,
@@ -1232,6 +1232,20 @@ export const canvasSlice = createSlice({
12321232
}
12331233
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
12341234
state.bbox.aspectRatio.isLocked = true;
1235+
} else if (state.bbox.modelBase === 'chatgpt-4o' && isChatGPT4oAspectRatioID(id)) {
1236+
// gpt-image has specific output sizes that are not exactly the same as the aspect ratio. Need special handling.
1237+
if (id === '3:2') {
1238+
state.bbox.rect.width = 1536;
1239+
state.bbox.rect.height = 1024;
1240+
} else if (id === '1:1') {
1241+
state.bbox.rect.width = 1024;
1242+
state.bbox.rect.height = 1024;
1243+
} else if (id === '2:3') {
1244+
state.bbox.rect.width = 1024;
1245+
state.bbox.rect.height = 1536;
1246+
}
1247+
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
1248+
state.bbox.aspectRatio.isLocked = true;
12351249
} else {
12361250
state.bbox.aspectRatio.isLocked = true;
12371251
state.bbox.aspectRatio.value = ASPECT_RATIO_MAP[id].ratio;
@@ -1704,7 +1718,7 @@ export const canvasSlice = createSlice({
17041718
const base = model?.base;
17051719
if (isMainModelBase(base) && state.bbox.modelBase !== base) {
17061720
state.bbox.modelBase = base;
1707-
if (base === 'imagen3') {
1721+
if (base === 'imagen3' || base === 'chatgpt-4o') {
17081722
state.bbox.aspectRatio.isLocked = true;
17091723
state.bbox.aspectRatio.value = 1;
17101724
state.bbox.aspectRatio.id = '1:1';
@@ -1843,7 +1857,7 @@ export const canvasPersistConfig: PersistConfig<CanvasState> = {
18431857
};
18441858

18451859
const syncScaledSize = (state: CanvasState) => {
1846-
if (state.bbox.modelBase === 'imagen3') {
1860+
if (state.bbox.modelBase === 'imagen3' || state.bbox.modelBase === 'chatgpt-4o') {
18471861
// Imagen3 has fixed sizes. Scaled bbox is not supported.
18481862
return;
18491863
}

invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -381,6 +381,7 @@ export const selectIsFLUX = createParamsSelector((params) => params.model?.base
381381
export const selectIsSD3 = createParamsSelector((params) => params.model?.base === 'sd-3');
382382
export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4');
383383
export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3');
384+
export const selectIsChatGTP4o = createParamsSelector((params) => params.model?.base === 'chatgpt-4o');
384385

385386
export const selectModel = createParamsSelector((params) => params.model);
386387
export const selectModelKey = createParamsSelector((params) => params.model?.key);

invokeai/frontend/web/src/features/controlLayers/store/types.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -388,9 +388,15 @@ export type StagingAreaImage = {
388388
};
389389

390390
export const zAspectRatioID = z.enum(['Free', '16:9', '3:2', '4:3', '1:1', '3:4', '2:3', '9:16']);
391+
391392
export const zImagen3AspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']);
392393
export const isImagen3AspectRatioID = (v: unknown): v is z.infer<typeof zImagen3AspectRatioID> =>
393394
zImagen3AspectRatioID.safeParse(v).success;
395+
396+
export const zChatGPT4oAspectRatioID = z.enum(['3:2', '1:1', '2:3']);
397+
export const isChatGPT4oAspectRatioID = (v: unknown): v is z.infer<typeof zChatGPT4oAspectRatioID> =>
398+
zChatGPT4oAspectRatioID.safeParse(v).success;
399+
394400
export type AspectRatioID = z.infer<typeof zAspectRatioID>;
395401
export const isAspectRatioID = (v: unknown): v is AspectRatioID => zAspectRatioID.safeParse(v).success;
396402

invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import type { PersistConfig, RootState } from 'app/store/store';
44
import { z } from 'zod';
55

66
const zSeedBehaviour = z.enum(['PER_ITERATION', 'PER_PROMPT']);
7-
type SeedBehaviour = z.infer<typeof zSeedBehaviour>;
7+
export type SeedBehaviour = z.infer<typeof zSeedBehaviour>;
88
export const isSeedBehaviour = (v: unknown): v is SeedBehaviour => zSeedBehaviour.safeParse(v).success;
99

1010
export interface DynamicPromptsState {

invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,8 @@ const NODE_TYPE_PUBLISH_DENYLIST = [
123123
'metadata_to_t2i_adapters',
124124
'google_imagen3_generate',
125125
'google_imagen3_edit',
126+
'chatgpt_create_image',
127+
'chatgpt_edit_image',
126128
];
127129

128130
export const selectHasUnpublishableNodes = createSelector(selectNodes, (nodes) => {

invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts

Lines changed: 38 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,35 +2,57 @@ import { NUMPY_RAND_MAX, NUMPY_RAND_MIN } from 'app/constants';
22
import type { RootState } from 'app/store/store';
33
import { generateSeeds } from 'common/util/generateSeeds';
44
import randomInt from 'common/util/randomInt';
5+
import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
6+
import type { ModelIdentifierField } from 'features/nodes/types/common';
57
import type { FieldIdentifier } from 'features/nodes/types/field';
68
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
79
import { range } from 'lodash-es';
810
import type { components } from 'services/api/schema';
911
import type { Batch, EnqueueBatchArg } from 'services/api/types';
12+
import { assert } from 'tsafe';
1013

11-
export const prepareLinearUIBatch = (
12-
state: RootState,
13-
g: Graph,
14-
prepend: boolean,
15-
seedFieldIdentifier: FieldIdentifier,
16-
positivePromptFieldIdentifier: FieldIdentifier,
17-
origin: 'canvas' | 'workflows' | 'upscaling',
18-
destination: 'canvas' | 'gallery'
19-
): EnqueueBatchArg => {
14+
const getExtendedPrompts = (arg: {
15+
seedBehaviour: SeedBehaviour;
16+
iterations: number;
17+
prompts: string[];
18+
model: ModelIdentifierField;
19+
}): string[] => {
20+
const { seedBehaviour, iterations, prompts, model } = arg;
21+
// Normally, the seed behaviour implicity determines the batch size. But when we use models without seeds (like
22+
// ChatGPT 4o) in conjunction with the per-prompt seed behaviour, we lose out on that implicit batch size. To rectify
23+
// this, we need to create a batch of the right size by repeating the prompts.
24+
if (seedBehaviour === 'PER_PROMPT' || model.base === 'chatgpt-4o') {
25+
return range(iterations).flatMap(() => prompts);
26+
}
27+
return prompts;
28+
};
29+
30+
export const prepareLinearUIBatch = (arg: {
31+
state: RootState;
32+
g: Graph;
33+
prepend: boolean;
34+
seedFieldIdentifier?: FieldIdentifier;
35+
positivePromptFieldIdentifier: FieldIdentifier;
36+
origin: 'canvas' | 'workflows' | 'upscaling';
37+
destination: 'canvas' | 'gallery';
38+
}): EnqueueBatchArg => {
39+
const { state, g, prepend, seedFieldIdentifier, positivePromptFieldIdentifier, origin, destination } = arg;
2040
const { iterations, model, shouldRandomizeSeed, seed, shouldConcatPrompts } = state.params;
2141
const { prompts, seedBehaviour } = state.dynamicPrompts;
2242

43+
assert(model, 'No model found in state when preparing batch');
44+
2345
const data: Batch['data'] = [];
2446
const firstBatchDatumList: components['schemas']['BatchDatum'][] = [];
2547
const secondBatchDatumList: components['schemas']['BatchDatum'][] = [];
2648

2749
// add seeds first to ensure the output order groups the prompts
28-
if (seedBehaviour === 'PER_PROMPT') {
50+
if (seedFieldIdentifier && seedBehaviour === 'PER_PROMPT') {
2951
const seeds = generateSeeds({
3052
count: prompts.length * iterations,
31-
// Imagen3's support for seeded generation is iffy, we are just not going too use in in linear UI generations.
53+
// Imagen3's support for seeded generation is iffy, we are just not going too use it in linear UI generations.
3254
start:
33-
model?.base === 'imagen3' ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) : shouldRandomizeSeed ? undefined : seed,
55+
model.base === 'imagen3' ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) : shouldRandomizeSeed ? undefined : seed,
3456
});
3557

3658
firstBatchDatumList.push({
@@ -46,13 +68,13 @@ export const prepareLinearUIBatch = (
4668
field_name: 'seed',
4769
items: seeds,
4870
});
49-
} else {
71+
} else if (seedFieldIdentifier && seedBehaviour === 'PER_ITERATION') {
5072
// seedBehaviour = SeedBehaviour.PerRun
5173
const seeds = generateSeeds({
5274
count: iterations,
5375
// Imagen3's support for seeded generation is iffy, we are just not going too use in in linear UI generations.
5476
start:
55-
model?.base === 'imagen3' ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) : shouldRandomizeSeed ? undefined : seed,
77+
model.base === 'imagen3' ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) : shouldRandomizeSeed ? undefined : seed,
5678
});
5779

5880
secondBatchDatumList.push({
@@ -71,7 +93,7 @@ export const prepareLinearUIBatch = (
7193
data.push(secondBatchDatumList);
7294
}
7395

74-
const extendedPrompts = seedBehaviour === 'PER_PROMPT' ? range(iterations).flatMap(() => prompts) : prompts;
96+
const extendedPrompts = getExtendedPrompts({ seedBehaviour, iterations, prompts, model });
7597

7698
// zipped batch of prompts
7799
firstBatchDatumList.push({
@@ -88,7 +110,7 @@ export const prepareLinearUIBatch = (
88110
items: extendedPrompts,
89111
});
90112

91-
if (shouldConcatPrompts && model?.base === 'sdxl') {
113+
if (shouldConcatPrompts && model.base === 'sdxl') {
92114
firstBatchDatumList.push({
93115
node_path: positivePromptFieldIdentifier.nodeId,
94116
field_name: 'style',

0 commit comments

Comments
 (0)