1
0
mirror of https://github.com/flutter/samples.git synced 2025-11-08 13:58:47 +00:00

Adds ai_recipe_generation sample (#2242)

Adding the demo app from my I/O talk. Because AI.

## Pre-launch Checklist

- [x] I read the [Flutter Style Guide] _recently_, and have followed its
advice.
- [x] I signed the [CLA].
- [x] I read the [Contributors Guide].
- [x] I updated/added relevant documentation (doc comments with `///`).
- [x] All existing and new tests are passing.

---------

Co-authored-by: Brett Morgan <brett.morgan@gmail.com>
This commit is contained in:
Eric Windmill
2024-05-14 11:41:20 -04:00
committed by GitHub
parent 8575261d37
commit be52906894
171 changed files with 8626 additions and 0 deletions

View File

@@ -0,0 +1,58 @@
import 'package:google_generative_ai/google_generative_ai.dart';
import '../features/prompt/prompt_model.dart';
class GeminiService {
static Future<GenerateContentResponse> generateContent(
GenerativeModel model, PromptData prompt) async {
if (prompt.images.isEmpty) {
return await GeminiService.generateContentFromText(model, prompt);
} else {
return await GeminiService.generateContentFromMultiModal(model, prompt);
}
}
static Future<GenerateContentResponse> generateContentFromMultiModal(
GenerativeModel model, PromptData prompt) async {
final mainText = TextPart(prompt.textInput);
final additionalTextParts =
prompt.additionalTextInputs.map((t) => TextPart(t));
final imagesParts = <DataPart>[];
for (var f in prompt.images) {
final bytes = await (f.readAsBytes());
imagesParts.add(DataPart('image/jpeg', bytes));
}
final input = [
Content.multi([...imagesParts, mainText, ...additionalTextParts])
];
return await model.generateContent(
input,
generationConfig: GenerationConfig(
temperature: 0.4,
topK: 32,
topP: 1,
maxOutputTokens: 4096,
),
safetySettings: [
SafetySetting(HarmCategory.harassment, HarmBlockThreshold.high),
SafetySetting(HarmCategory.hateSpeech, HarmBlockThreshold.high),
],
);
}
static Future<GenerateContentResponse> generateContentFromText(
GenerativeModel model, PromptData prompt) async {
final mainText = TextPart(prompt.textInput);
final additionalTextParts =
prompt.additionalTextInputs.map((t) => TextPart(t)).join("\n");
return await model.generateContent([
Content.text(
'${mainText.text} \n $additionalTextParts',
)
]);
}
}