Google Gemini
1. Overview
Google's multimodal AI model is designed to process various types of data, including text, images, audio, video, and code.
Available model list:
gemini-1.5-progemini-1.5-pro-001gemini-1.5-pro-002gemini-1.5-flash-001gemini-1.5-flash-002gemini-1.5-flash-8bgemini-1.5-pro-latestgemini-exp-1114gemini-exp-1121gemini-exp-1206gemini-2.0-flash-expgemini-2.0-flash-thinking-exp-1219gemini-2.0-flash-thinking-exp-01-21gemini-2.0-flashgemini-2.0-flash-lite-preview-02-05gemini-2.0-pro-exp-02-05
2. Request Description
Request method:
POSTRequest address:
https://gateway.theturbo.ai/v1/chat/completions
3. Input Parameters
3.1 Header Parameters
Content-Type
string
Yes
Set the request header type, which must be application/json
application/json
Accept
string
Yes
Set the response type, which is recommended to be unified as application/json
application/json
Authorization
string
Yes
API_KEY required for authentication. Format: Bearer $YOUR_API_KEY
Bearer $YOUR_API_KEY
3.2 Body Parameters (application/json)
model
string
Yes
The model ID to use. See available models listed in the Overview for details, such as gemini-1.5-pro.
gemini-1.5-pro
messages
array
Yes
Chat message list, compatible with OpenAI interface format. Each object in the array contains role and content.
[{"role": "user","content": "hello"}]
role
string
No
Message role. Optional values: system, user, assistant.
user
content
string
No
The specific content of the message.
Hello, please tell me a joke.
temperature
number
No
Sampling temperature, taking a value between 0 and 2. The larger the value, the more random the output; the smaller the value, the more concentrated and certain the output.
0.7
top_p
number
No
Another way to adjust the sampling distribution, taking a value between 0 and 1. It is usually set as an alternative to the temperature.
0.9
n
number
No
How many replies to generate for each input message.
1
stream
boolean
No
Whether to enable streaming output. When set to true, returns streaming data similar to ChatGPT.
false
stop
string
No
Up to 4 strings can be specified. Once one of these strings appears in the generated content, it stops generating more tokens.
"\n"
max_tokens
number
No
The maximum number of tokens that can be generated in a single reply, subject to the model context length limit.
1024
presence_penalty
number
No
-2.0 ~ 2.0. A positive value encourages the model to output more new topics, while a negative value reduces the probability of outputting new topics.
0
frequency_penalty
number
No
-2.0 ~ 2.0. A positive value reduces the frequency of repeated phrases in the model, while a negative value increases the probability of repeated phrases.
0
4. Request Example
4.1 Chat conversation
POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY
{
	"model": "gemini-1.5-pro",
	"messages": [
		{
			"role": "user",
			"content": "Hello, can you explain quantum mechanics to me?"
		}
	],
	"temperature": 0.7,
	"max_tokens": 1024
}curl https://gateway.theturbo.ai/v1/chat/completions \
	-H "Content-Type: application/json" \
	-H "Accept: application/json" \
	-H "Authorization: Bearer $YOUR_API_KEY" \
	-d "{
	\"model\": \"gemini-1.5-pro\",
	\"messages\": [{
		\"role\": \"user\",
		\"content\": \"Hello, can you explain quantum mechanics to me?\"
	}]
}"package main
import (
	"fmt"
	"io/ioutil"
	"net/http"
	"strings"
)
const (
	YOUR_API_KEY    = "sk-123456789012345678901234567890123456789012345678"
	REQUEST_PAYLOAD = `{
	"model": "gemini-1.5-pro",
	"messages": [{
		"role": "user",
		"content": "Hello, can you explain quantum mechanics to me?"
	}],
	"temperature": 0.7,
	"max_tokens": 1024
}`
)
func main() {
	requestURL := "https://gateway.theturbo.ai/v1/chat/completions"
	requestMethod := "POST"
	requestPayload := strings.NewReader(REQUEST_PAYLOAD)
	req, err := http.NewRequest(requestMethod, requestURL, requestPayload)
	if err != nil {
		fmt.Println("Create request failed, err: ", err)
		return
	}
	req.Header.Add("Content-Type", "application/json")
	req.Header.Add("Accept", "application/json")
	req.Header.Add("Authorization", "Bearer "+YOUR_API_KEY)
	client := &http.Client{}
	resp, err := client.Do(req)
	if err != nil {
		fmt.Println("Do request failed, err: ", err)
		return
	}
	defer resp.Body.Close()
	respBodyBytes, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		fmt.Println("Read response body failed, err: ", err)
		return
	}
	fmt.Println(string(respBodyBytes))
}4.2 Media file recognition
POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY
{
	"model": "gemini-1.5-pro",
	"messages": [
		{
			"role": "user",
			"content": [
				{
					"type": "text",
					"text": "What's in this picture?"
				},
				{
					"type": "image_url",
					"image_url": {
						"url": "data:image/jpeg;base64,${base64_image}"
					}
				}
			]
		}
	],
	"temperature": 0.7,
	"max_tokens": 1024
}base64_image=$(base64 -i "Path/to/agi/image.jpeg");
curl https://gateway.theturbo.ai/v1/chat/completions \
	-H "Content-Type: application/json" \
	-H "Accept: application/json" \
	-H "Authorization: Bearer $YOUR_API_KEY" \
	-d "{
	\"model\": \"gemini-1.5-pro\",
	\"messages\": [{
		\"role\": \"user\",
		\"content\": [{
				\"type\": \"text\",
				\"text\": \"What's in this picture?\"
			},
			{
				\"type\": \"image_url\",
				\"image_url\": {
					\"url\": \"data:image/jpeg;base64,${base64_image}\"
				}
			}
		]
	}]
}"package main
import (
	"encoding/base64"
	"fmt"
	"io/ioutil"
	"net/http"
	"os"
	"strings"
)
const (
	YOUR_API_KEY    = "sk-123456789012345678901234567890123456789012345678"
	FILE_MIME_TYPE  = "image/jpeg"
	FILE_PATH       = "/path/to/your/image.jpeg"
	REQUEST_PAYLOAD = `{
	"model": "gemini-1.5-pro",
	"messages": [{
		"role": "user",
		"content": [{
				"type": "text",
				"text": "What's in this picture?"
			},
			{
				"type": "image_url",
				"image_url": {
					"url": "data:%s;base64,%s"
				}
			}
		]
	}],
	"temperature": 0.7,
	"max_tokens": 1024
}`
)
func GetFileDataBase64(filePath string) (string, error) {
	file, err := os.Open(FILE_PATH)
	if err != nil {
		return "", err
	}
	defer file.Close()
	fileData, err := ioutil.ReadAll(file)
	if err != nil {
		return "", err
	}
	return base64.StdEncoding.EncodeToString(fileData), nil
}
func main() {
	requestURL := "https://gateway.theturbo.ai/v1/chat/completions"
	requestMethod := "POST"
	fileBase64String, err := GetFileDataBase64(FILE_PATH)
	if err != nil {
		fmt.Println("Read file failed, err: ", err)
		return
	}
	requestPayload := strings.NewReader(fmt.Sprintf(REQUEST_PAYLOAD,
		FILE_MIME_TYPE,
		fileBase64String,
	))
	req, err := http.NewRequest(requestMethod, requestURL, requestPayload)
	if err != nil {
		fmt.Println("Create request failed, err: ", err)
		return
	}
	req.Header.Add("Content-Type", "application/json")
	req.Header.Add("Accept", "application/json")
	req.Header.Add("Authorization", "Bearer "+YOUR_API_KEY)
	client := &http.Client{}
	resp, err := client.Do(req)
	if err != nil {
		fmt.Println("Do request failed, err: ", err)
		return
	}
	defer resp.Body.Close()
	respBodyBytes, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		fmt.Println("Read response body failed, err: ", err)
		return
	}
	fmt.Println(string(respBodyBytes))
}4.3 Function invocation
POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY
{
	"model": "gemini-1.5-pro",
	"messages": [{
		"role": "user",
		"content": "What's the weather like in Boston today?"
	}],
	"tools": [{
		"type": "function",
		"function": {
			"name": "get_current_weather",
			"description": "Get the current weather in a given location",
			"parameters": {
				"type": "object",
				"properties": {
					"location": {
						"type": "string",
						"description": "The city and state, e.g. San Francisco, CA"
					},
					"unit": {
						"type": "string",
						"enum": ["celsius", "fahrenheit"]
					}
				},
				"required": ["location"]
			}
		}
	}],
	"tool_choice": "auto"
}curl https://gateway.theturbo.ai/v1/chat/completions \
	-H "Content-Type: application/json" \
	-H "Accept: application/json" \
	-H "Authorization: Bearer $YOUR_API_KEY" \
	-d "{
	\"model\": \"gemini-1.5-pro\",
	\"messages\": [{
		\"role\": \"user\",
		\"content\": \"What's the weather like in Boston today?\"
	}],
	\"tools\": [{
		\"type\": \"function\",
		\"function\": {
			\"name\": \"get_current_weather\",
			\"description\": \"Get the current weather in a given location\",
			\"parameters\": {
				\"type\": \"object\",
				\"properties\": {
					\"location\": {
						\"type\": \"string\",
						\"description\": \"The city and state, e.g. San Francisco, CA\"
					},
					\"unit\": {
						\"type\": \"string\",
						\"enum\": [\"celsius\", \"fahrenheit\"]
					}
				},
				\"required\": [\"location\"]
			}
		}
	}],
	\"tool_choice\": \"auto\"
}"package main
import (
	"fmt"
	"io/ioutil"
	"net/http"
	"strings"
)
const (
	YOUR_API_KEY    = "sk-123456789012345678901234567890123456789012345678"
	REQUEST_PAYLOAD = `{
	"model": "gemini-1.5-pro",
	"messages": [{
		"role": "user",
		"content": "What's the weather like in Boston today?"
	}],
	"tools": [{
		"type": "function",
		"function": {
			"name": "get_current_weather",
			"description": "Get the current weather in a given location",
			"parameters": {
				"type": "object",
				"properties": {
					"location": {
						"type": "string",
						"description": "The city and state, e.g. San Francisco, CA"
					},
					"unit": {
						"type": "string",
						"enum": ["celsius", "fahrenheit"]
					}
				},
				"required": ["location"]
			}
		}
	}],
	"tool_choice": "auto"
}`
)
func main() {
	requestURL := "https://gateway.theturbo.ai/v1/chat/completions"
	requestMethod := "POST"
	requestPayload := strings.NewReader(REQUEST_PAYLOAD)
	req, err := http.NewRequest(requestMethod, requestURL, requestPayload)
	if err != nil {
		fmt.Println("Create request failed, err: ", err)
		return
	}
	req.Header.Add("Content-Type", "application/json")
	req.Header.Add("Accept", "application/json")
	req.Header.Add("Authorization", "Bearer "+YOUR_API_KEY)
	client := &http.Client{}
	resp, err := client.Do(req)
	if err != nil {
		fmt.Println("Do request failed, err: ", err)
		return
	}
	defer resp.Body.Close()
	respBodyBytes, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		fmt.Println("Read response body failed, err: ", err)
		return
	}
	fmt.Println(string(respBodyBytes))
}5. Response Example
{
	"id": "chatcmpl-1234567890",
	"object": "chat.completion",
	"created": 1699999999,
	"model": "gemini-1.5-pro",
	"choices": [
		{
			"message": {
				"role": "assistant",
				"content": "Quantum mechanics is a branch of physics that studies the microscopic world..."
			},
			"finish_reason": "stop"
		}
	],
	"usage": {
		"prompt_tokens": 10,
		"completion_tokens": 30,
		"total_tokens": 40
	}
}Last updated