Vertex AI in Firebase SDK を使用してアプリから Gemini API を呼び出すときに、マルチモーダル入力に基づいてテキストを生成するように Gemini モデルにプロンプトを出すことができます。マルチモーダル プロンプトには、テキスト、画像、PDF、テキスト ファイル、動画、音声など、複数のモダリティ(または入力タイプ)を含めることができます。
必要に応じて、Gemini API の代替の「Google AI」バージョンをテストします。 Google AI Studio と Google AI クライアント SDK を使用して、(制限内で、利用可能な場合)無料アクセスを取得します。これらの SDK は、モバイルアプリとウェブアプリのプロトタイピングのみに使用してください。
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")guardletimage=UIImage(systemName:"bicycle")else{fatalError()}// Provide a text prompt to include with the imageletprompt="What's in this picture?"// To generate text output, call generateContent and pass in the promptletresponse=tryawaitmodel.generateContent(image,prompt)print(response.text??"No text in response.")
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")// Loads an image from the app/res/drawable/ directoryvalbitmap:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky)// Provide a prompt that includes the image specified above and textvalprompt=content{image(bitmap)text("What developer tool is this mascot from?")}// To generate text output, call generateContent with the promptvalresponse=generativeModel.generateContent(prompt)print(response.text)
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);Bitmapbitmap=BitmapFactory.decodeResource(getResources(),R.drawable.sparky);// Provide a prompt that includes the image specified above and textContentcontent=newContent.Builder().addImage(bitmap).addText("What developer tool is this mascot from?").build();// To generate text output, call generateContent with the promptListenableFuture<GenerateContentResponse>response=model.generateContent(content);Futures.addCallback(response,newFutureCallback<GenerateContentResponse>(){@OverridepublicvoidonSuccess(GenerateContentResponseresult){StringresultText=result.getText();System.out.println(resultText);}@OverridepublicvoidonFailure(Throwablet){t.printStackTrace();}},executor);
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the imageconstprompt="What's different between these pictures?";constfileInputEl=document.querySelector("input[type=file]");constimagePart=awaitfileToGenerativePart(fileInputEl.files[0]);// To generate text output, call generateContent with the text and imageconstresult=awaitmodel.generateContent([prompt,imagePart]);constresponse=result.response;consttext=response.text();console.log(text);}run();
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');// Provide a text prompt to include with the imagefinalprompt=TextPart("What's in the picture?");// Prepare images for inputfinalimage=awaitFile('image0.jpg').readAsBytes();finalimagePart=InlineDataPart('image/jpeg',image);// To generate text output, call generateContent with the text and imagefinalresponse=awaitmodel.generateContent([Content.multi([prompt,imagePart])]);print(response.text);
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")guardletimage1=UIImage(systemName:"car")else{fatalError()}guardletimage2=UIImage(systemName:"car.2")else{fatalError()}// Provide a text prompt to include with the imagesletprompt="What's different between these pictures?"// To generate text output, call generateContent and pass in the promptletresponse=tryawaitmodel.generateContent(image1,image2,prompt)print(response.text??"No text in response.")
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")// Loads an image from the app/res/drawable/ directoryvalbitmap1:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky)valbitmap2:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky_eats_pizza)// Provide a prompt that includes the images specified above and textvalprompt=content{image(bitmap1)image(bitmap2)text("What is different between these pictures?")}// To generate text output, call generateContent with the promptvalresponse=generativeModel.generateContent(prompt)print(response.text)
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);Bitmapbitmap1=BitmapFactory.decodeResource(getResources(),R.drawable.sparky);Bitmapbitmap2=BitmapFactory.decodeResource(getResources(),R.drawable.sparky_eats_pizza);// Provide a prompt that includes the images specified above and textContentprompt=newContent.Builder().addImage(bitmap1).addImage(bitmap2).addText("What's different between these pictures?").build();// To generate text output, call generateContent with the promptListenableFuture<GenerateContentResponse>response=model.generateContent(prompt);Futures.addCallback(response,newFutureCallback<GenerateContentResponse>(){@OverridepublicvoidonSuccess(GenerateContentResponseresult){StringresultText=result.getText();System.out.println(resultText);}@OverridepublicvoidonFailure(Throwablet){t.printStackTrace();}},executor);
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the imagesconstprompt="What's different between these pictures?";// Prepare images for inputconstfileInputEl=document.querySelector("input[type=file]");constimageParts=awaitPromise.all([...fileInputEl.files].map(fileToGenerativePart));// To generate text output, call generateContent with the text and imagesconstresult=awaitmodel.generateContent([prompt,...imageParts]);constresponse=result.response;consttext=response.text();console.log(text);}run();
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');final(firstImage,secondImage)=await(File('image0.jpg').readAsBytes(),File('image1.jpg').readAsBytes()).wait;// Provide a text prompt to include with the imagesfinalprompt=TextPart("What's different between these pictures?");// Prepare images for inputfinalimageParts=[InlineDataPart('image/jpeg',firstImage),InlineDataPart('image/jpeg',secondImage),];// To generate text output, call generateContent with the text and imagesfinalresponse=awaitmodel.generateContent([Content.multi([prompt,...imageParts])]);print(response.text);
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")// Provide the video as `Data` with the appropriate MIME type.letvideo=InlineDataPart(data:tryData(contentsOf:videoURL),mimeType:"video/mp4")// Provide a text prompt to include with the videoletprompt="What is in the video?"// To generate text output, call generateContent with the text and videoletresponse=tryawaitmodel.generateContent(video,prompt)print(response.text??"No text in response.")
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")valcontentResolver=applicationContext.contentResolvercontentResolver.openInputStream(videoUri).use{stream->stream?.let{valbytes=stream.readBytes()// Provide a prompt that includes the video specified above and textvalprompt=content{inlineData(bytes,"video/mp4")text("What is in the video?")}// To generate text output, call generateContent with the promptvalresponse=generativeModel.generateContent(prompt)Log.d(TAG,response.text?:"")}}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);ContentResolverresolver=getApplicationContext().getContentResolver();try(InputStreamstream=resolver.openInputStream(videoUri)){FilevideoFile=newFile(newURI(videoUri.toString()));intvideoSize=(int)videoFile.length();byte[]videoBytes=newbyte[videoSize];if(stream!=null){stream.read(videoBytes,0,videoBytes.length);stream.close();// Provide a prompt that includes the video specified above and textContentprompt=newContent.Builder().addInlineData(videoBytes,"video/mp4").addText("What is in the video?").build();// To generate text output, call generateContent with the promptListenableFuture<GenerateContentResponse>response=model.generateContent(prompt);Futures.addCallback(response,newFutureCallback<GenerateContentResponse>(){@OverridepublicvoidonSuccess(GenerateContentResponseresult){StringresultText=result.getText();System.out.println(resultText);}@OverridepublicvoidonFailure(Throwablet){t.printStackTrace();}},executor);}}catch(IOExceptione){e.printStackTrace();}catch(URISyntaxExceptione){e.printStackTrace();}
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the videoconstprompt="What do you see?";constfileInputEl=document.querySelector("input[type=file]");constvideoPart=awaitfileToGenerativePart(fileInputEl.files[0]);// To generate text output, call generateContent with the text and videoconstresult=awaitmodel.generateContent([prompt,videoPart]);constresponse=result.response;consttext=response.text();console.log(text);}run();
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');// Provide a text prompt to include with the videofinalprompt=TextPart("What's in the video?");// Prepare video for inputfinalvideo=awaitFile('video0.mp4').readAsBytes();// Provide the video as `Data` with the appropriate mimetypefinalvideoPart=InlineDataPart('video/mp4',video);// To generate text output, call generateContent with the text and imagesfinalresponse=awaitmodel.generateContent([Content.multi([prompt,...videoPart])]);print(response.text);
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")guardletimage=UIImage(systemName:"bicycle")else{fatalError()}// Provide a text prompt to include with the imageletprompt="What's in this picture?"// To stream generated text output, call generateContentStream and pass in the promptletcontentStream=trymodel.generateContentStream(image,prompt)fortryawaitchunkincontentStream{iflettext=chunk.text{print(text)}}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")// Loads an image from the app/res/drawable/ directoryvalbitmap:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky)// Provide a prompt that includes the image specified above and textvalprompt=content{image(bitmap)text("What developer tool is this mascot from?")}// To stream generated text output, call generateContentStream with the promptvarfullResponse=""generativeModel.generateContentStream(prompt).collect{chunk->print(chunk.text)fullResponse+=chunk.text}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);Bitmapbitmap=BitmapFactory.decodeResource(getResources(),R.drawable.sparky);// Provide a prompt that includes the image specified above and textContentprompt=newContent.Builder().addImage(bitmap).addText("What developer tool is this mascot from?").build();// To stream generated text output, call generateContentStream with the promptPublisher<GenerateContentResponse>streamingResponse=model.generateContentStream(prompt);finalString[]fullResponse={""};streamingResponse.subscribe(newSubscriber<GenerateContentResponse>(){@OverridepublicvoidonNext(GenerateContentResponsegenerateContentResponse){Stringchunk=generateContentResponse.getText();fullResponse[0]+=chunk;}@OverridepublicvoidonComplete(){System.out.println(fullResponse[0]);}@OverridepublicvoidonError(Throwablet){t.printStackTrace();}@OverridepublicvoidonSubscribe(Subscriptions){}});
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the imageconstprompt="What do you see?";// Prepare image for inputconstfileInputEl=document.querySelector("input[type=file]");constimagePart=awaitfileToGenerativePart(fileInputEl.files[0]);// To stream generated text output, call generateContentStream with the text and imageconstresult=awaitmodel.generateContentStream([prompt,imagePart]);forawait(constchunkofresult.stream){constchunkText=chunk.text();console.log(chunkText);}}run();
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');// Provide a text prompt to include with the imagefinalprompt=TextPart("What's in the picture?");// Prepare images for inputfinalimage=awaitFile('image0.jpg').readAsBytes();finalimagePart=InlineDataPart('image/jpeg',image);// To stream generated text output, call generateContentStream with the text and imagefinalresponse=awaitmodel.generateContentStream([Content.multi([prompt,imagePart])]);awaitfor(finalchunkinresponse){print(chunk.text);}
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")guardletimage1=UIImage(systemName:"car")else{fatalError()}guardletimage2=UIImage(systemName:"car.2")else{fatalError()}// Provide a text prompt to include with the imagesletprompt="What's different between these pictures?"// To stream generated text output, call generateContentStream and pass in the promptletcontentStream=trymodel.generateContentStream(image1,image2,prompt)fortryawaitchunkincontentStream{iflettext=chunk.text{print(text)}}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")// Loads an image from the app/res/drawable/ directoryvalbitmap1:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky)valbitmap2:Bitmap=BitmapFactory.decodeResource(resources,R.drawable.sparky_eats_pizza)// Provide a prompt that includes the images specified above and textvalprompt=content{image(bitmap1)image(bitmap2)text("What's different between these pictures?")}// To stream generated text output, call generateContentStream with the promptvarfullResponse=""generativeModel.generateContentStream(prompt).collect{chunk->print(chunk.text)fullResponse+=chunk.text}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);Bitmapbitmap1=BitmapFactory.decodeResource(getResources(),R.drawable.sparky);Bitmapbitmap2=BitmapFactory.decodeResource(getResources(),R.drawable.sparky_eats_pizza);// Provide a prompt that includes the images specified above and textContentprompt=newContent.Builder().addImage(bitmap1).addImage(bitmap2).addText("What's different between these pictures?").build();// To stream generated text output, call generateContentStream with the promptPublisher<GenerateContentResponse>streamingResponse=model.generateContentStream(prompt);finalString[]fullResponse={""};streamingResponse.subscribe(newSubscriber<GenerateContentResponse>(){@OverridepublicvoidonNext(GenerateContentResponsegenerateContentResponse){Stringchunk=generateContentResponse.getText();fullResponse[0]+=chunk;}@OverridepublicvoidonComplete(){System.out.println(fullResponse[0]);}@OverridepublicvoidonError(Throwablet){t.printStackTrace();}@OverridepublicvoidonSubscribe(Subscriptions){}});
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the imagesconstprompt="What's different between these pictures?";constfileInputEl=document.querySelector("input[type=file]");constimageParts=awaitPromise.all([...fileInputEl.files].map(fileToGenerativePart));// To stream generated text output, call generateContentStream with the text and imagesconstresult=awaitmodel.generateContentStream([prompt,...imageParts]);forawait(constchunkofresult.stream){constchunkText=chunk.text();console.log(chunkText);}}run();
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');final(firstImage,secondImage)=await(File('image0.jpg').readAsBytes(),File('image1.jpg').readAsBytes()).wait;// Provide a text prompt to include with the imagesfinalprompt=TextPart("What's different between these pictures?");// Prepare images for inputfinalimageParts=[InlineDataPart('image/jpeg',firstImage),InlineDataPart('image/jpeg',secondImage),];// To stream generated text output, call generateContentStream with the text and imagesfinalresponse=awaitmodel.generateContentStream([Content.multi([prompt,...imageParts])]);awaitfor(finalchunkinresponse){print(chunk.text);}
importFirebaseVertexAI// Initialize the Vertex AI serviceletvertex=VertexAI.vertexAI()// Create a `GenerativeModel` instance with a model that supports your use caseletmodel=vertex.generativeModel(modelName:"gemini-2.0-flash")// Provide the video as `Data` with the appropriate MIME typeletvideo=InlineDataPart(data:tryData(contentsOf:videoURL),mimeType:"video/mp4")// Provide a text prompt to include with the videoletprompt="What is in the video?"// To stream generated text output, call generateContentStream with the text and videoletcontentStream=trymodel.generateContentStream(video,prompt)fortryawaitchunkincontentStream{iflettext=chunk.text{print(text)}}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casevalgenerativeModel=Firebase.vertexAI.generativeModel("gemini-2.0-flash")valcontentResolver=applicationContext.contentResolvercontentResolver.openInputStream(videoUri).use{stream->stream?.let{valbytes=stream.readBytes()// Provide a prompt that includes the video specified above and textvalprompt=content{inlineData(bytes,"video/mp4")text("What is in the video?")}// To stream generated text output, call generateContentStream with the promptvarfullResponse=""generativeModel.generateContentStream(prompt).collect{chunk->Log.d(TAG,chunk.text?:"")fullResponse+=chunk.text}}}
// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use caseGenerativeModelgm=FirebaseVertexAI.getInstance().generativeModel("gemini-2.0-flash");GenerativeModelFuturesmodel=GenerativeModelFutures.from(gm);ContentResolverresolver=getApplicationContext().getContentResolver();try(InputStreamstream=resolver.openInputStream(videoUri)){FilevideoFile=newFile(newURI(videoUri.toString()));intvideoSize=(int)videoFile.length();byte[]videoBytes=newbyte[videoSize];if(stream!=null){stream.read(videoBytes,0,videoBytes.length);stream.close();// Provide a prompt that includes the video specified above and textContentprompt=newContent.Builder().addInlineData(videoBytes,"video/mp4").addText("What is in the video?").build();// To stream generated text output, call generateContentStream with the promptPublisher<GenerateContentResponse>streamingResponse=model.generateContentStream(prompt);finalString[]fullResponse={""};streamingResponse.subscribe(newSubscriber<GenerateContentResponse>(){@OverridepublicvoidonNext(GenerateContentResponsegenerateContentResponse){Stringchunk=generateContentResponse.getText();fullResponse[0]+=chunk;}@OverridepublicvoidonComplete(){System.out.println(fullResponse[0]);}@OverridepublicvoidonError(Throwablet){t.printStackTrace();}@OverridepublicvoidonSubscribe(Subscriptions){}});}}catch(IOExceptione){e.printStackTrace();}catch(URISyntaxExceptione){e.printStackTrace();}
import{initializeApp}from"firebase/app";import{getVertexAI,getGenerativeModel}from"firebase/vertexai";// TODO(developer) Replace the following with your app's Firebase configuration// See: https://firebase.google.com/docs/web/learn-more#config-objectconstfirebaseConfig={// ...};// Initialize FirebaseAppconstfirebaseApp=initializeApp(firebaseConfig);// Initialize the Vertex AI serviceconstvertexAI=getVertexAI(firebaseApp);// Create a `GenerativeModel` instance with a model that supports your use caseconstmodel=getGenerativeModel(vertexAI,{model:"gemini-2.0-flash"});// Converts a File object to a Part object.asyncfunctionfileToGenerativePart(file){constbase64EncodedDataPromise=newPromise((resolve)=>{constreader=newFileReader();reader.onloadend=()=>resolve(reader.result.split(',')[1]);reader.readAsDataURL(file);});return{inlineData:{data:awaitbase64EncodedDataPromise,mimeType:file.type},};}asyncfunctionrun(){// Provide a text prompt to include with the videoconstprompt="What do you see?";constfileInputEl=document.querySelector("input[type=file]");constvideoPart=awaitfileToGenerativePart(fileInputEl.files[0]);// To stream generated text output, call generateContentStream with the text and videoconstresult=awaitmodel.generateContentStream([prompt,videoPart]);forawait(constchunkofresult.stream){constchunkText=chunk.text();console.log(chunkText);}}run();
import'package:firebase_vertexai/firebase_vertexai.dart';import'package:firebase_core/firebase_core.dart';import'firebase_options.dart';awaitFirebase.initializeApp(options:DefaultFirebaseOptions.currentPlatform,);// Initialize the Vertex AI service and create a `GenerativeModel` instance// Specify a model that supports your use casefinalmodel=FirebaseVertexAI.instance.generativeModel(model:'gemini-2.0-flash');// Provide a text prompt to include with the videofinalprompt=TextPart("What's in the video?");// Prepare video for inputfinalvideo=awaitFile('video0.mp4').readAsBytes();// Provide the video as `Data` with the appropriate mimetypefinalvideoPart=InlineDataPart('video/mp4',video);// To stream generated text output, call generateContentStream with the text and imagefinalresponse=awaitmodel.generateContentStream([Content.multi([prompt,videoPart])]);awaitfor(finalchunkinresponse){print(chunk.text);}