Findy Tools
開発ツールのレビューサイト
目次
Xのツイートボタン
このエントリーをはてなブックマークに追加
Xのツイートボタン
このエントリーをはてなブックマークに追加
公開日 更新日

code-block

plain text

plain text

ruby

def main
  puts "hoge"
end

js

let var = "hoge"
console.log(var)

ts

const hoge: string = "hoge"
console.warn(hoge)

python

message='Hello, Python!'
print(message)

basic

LET VAR = "hoge"

json

{
  "key": "value",
  "num": 1,
  "bool": true
}

python code

# LongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongCommentLongLongComment
def bedrock_converse(client, id, prompt, max_tokens=300, temperature=0, top_p=0.9):
    response = ""
    response = client.converse(
        modelId=id,
        messages=[
            {
                "role": "user",
                "content": [
                    {
                        "text": prompt
                    }
                ],
            }
        ],
        inferenceConfig={
            "temperature": temperature,
            "maxTokens": max_tokens,
            "topP": top_p
        }
        #additionalModelRequestFields={
        #}
    )
    result = response['output']['message']['content'][0]['text'] \
    + '\n--- Latency: ' + str(response['metrics']['latencyMs']) \
    + 'ms - Input tokens:' + str(response['usage']['inputTokens']) \
    + ' - Output tokens:' + str(response['usage']['outputTokens']) + ' ---\n'
    return result
prompt = ("日本で一番高い山はなんですか")
print(f'Prompt: {prompt}\n')

for i in model_ids:
    response = bedrock_converse(bedrock_runtime_client, i, prompt)
    print(f'Model: {i}\n{response}')
また、ConverseStream API を利用して出力のストリーミングも可能です。
def bedrock_converse_stream(client, id, prompt, max_tokens=500, temperature=0, top_p=0.9):
    response = ""
    response = client.converse_stream(
        modelId=id,
        messages=[
            {
                "role": "user",
                "content": [
                    {
                        "text": prompt
                    }
                ]
            }
        ],
        inferenceConfig={
            "temperature": temperature,
            "maxTokens": max_tokens,
            "topP": top_p
        }
    )
    # Extract and print the response text in real-time.
    for event in response['stream']:
        if 'contentBlockDelta' in event:
            chunk = event['contentBlockDelta']
            sys.stdout.write(chunk['delta']['text'])
            sys.stdout.flush()
    return
prompt = ("いろは歌を教えて")
print(f'Prompt: {prompt}\n')

for i in model_ids:
    print(f'\n\nModel: {i}')
    bedrock_converse_stream(bedrock_runtime_client, i, prompt) 

aa