chore(llm-tool): add the top_p option in the llm-tool (#41)

* chore: add top p option in llm-tool
* chore: wire up the top p with async generate
This commit is contained in:
Anchen
2024-04-04 01:54:54 +11:00
committed by GitHub
parent b3eb428c60
commit 2d0fdfe3a9
4 changed files with 13 additions and 7 deletions

View File

@@ -71,7 +71,7 @@ public struct TokenIterator: Sequence, IteratorProtocol {
/// ///
/// Note that because MLXArray is not thread safe this eval's the result and sends the TokenId back /// Note that because MLXArray is not thread safe this eval's the result and sends the TokenId back
/// to the caller. /// to the caller.
public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0) -> ( public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0) -> (
Task<Void, Never>, AsyncBufferSequence<AsyncChannel<Int>> Task<Void, Never>, AsyncBufferSequence<AsyncChannel<Int>>
) { ) {
let channel = AsyncChannel<Int>() let channel = AsyncChannel<Int>()
@@ -85,7 +85,7 @@ public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0) -> (
var logits: MLXArray var logits: MLXArray
(logits, cache) = model( (logits, cache) = model(
expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache) expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache)
y = sample(logits: logits[-1, axis: 1], temp: temp) y = sample(logits: logits[-1, axis: 1], temp: temp, topP: topP)
eval(y) eval(y)
await channel.send(y.item(Int.self)) await channel.send(y.item(Int.self))

View File

@@ -29,6 +29,9 @@ struct LLMArguments: ParsableArguments {
@Option(name: .shortAndLong, help: "The sampling temperature") @Option(name: .shortAndLong, help: "The sampling temperature")
var temperature: Float = 0.6 var temperature: Float = 0.6
@Option(name: .shortAndLong, help: "The top p sampling")
var topP: Float = 0.9
@Option(name: .long, help: "The PRNG seed") @Option(name: .long, help: "The PRNG seed")
var seed: UInt64 = 0 var seed: UInt64 = 0
@@ -127,7 +130,7 @@ struct SyncGenerator: AsyncParsableCommand {
var printed = 0 var printed = 0
for token in TokenIterator( for token in TokenIterator(
prompt: MLXArray(promptTokens), model: model, temp: args.temperature) prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP)
{ {
if tokens.isEmpty { if tokens.isEmpty {
eval(token) eval(token)
@@ -205,7 +208,7 @@ struct AsyncGenerator: AsyncParsableCommand {
var printed = 0 var printed = 0
let (task, channel) = generate( let (task, channel) = generate(
prompt: MLXArray(promptTokens), model: model, temp: args.temperature) prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP)
for await token in channel { for await token in channel {
if tokens.isEmpty { if tokens.isEmpty {

View File

@@ -1,5 +1,4 @@
{ {
"originHash" : "da53546673b6d05016b6e5640c18814c7dba5b5af8db34715afe6d633037c758",
"pins" : [ "pins" : [
{ {
"identity" : "gzipswift", "identity" : "gzipswift",
@@ -79,9 +78,9 @@
"location" : "https://github.com/huggingface/swift-transformers", "location" : "https://github.com/huggingface/swift-transformers",
"state" : { "state" : {
"branch" : "main", "branch" : "main",
"revision" : "3bd02269b7797ade67c15679a575cd5c6f203ce6" "revision" : "9d82e00af680253499f1a9372abb2552a73527fb"
} }
} }
], ],
"version" : 3 "version" : 2
} }

View File

@@ -55,6 +55,10 @@
argument = "--model mlx-community/CodeLlama-13b-Instruct-hf-4bit-MLX" argument = "--model mlx-community/CodeLlama-13b-Instruct-hf-4bit-MLX"
isEnabled = "NO"> isEnabled = "NO">
</CommandLineArgument> </CommandLineArgument>
<CommandLineArgument
argument = "--top-p 0.95"
isEnabled = "NO">
</CommandLineArgument>
<CommandLineArgument <CommandLineArgument
argument = "--model mlx-community/c4ai-command-r-v01-4bit" argument = "--model mlx-community/c4ai-command-r-v01-4bit"
isEnabled = "YES"> isEnabled = "YES">