chore: add repetition_penalty example (#45)
This commit is contained in:
@@ -25,6 +25,25 @@ private func topPSampling(logits: MLXArray, topP: Float, temp: Float) -> MLXArra
|
||||
return sortedIndices.squeezed(axis: 0)[sortedToken]
|
||||
}
|
||||
|
||||
private func applyRepetitionPenalty(
|
||||
logits: MLXArray, repetitionContext: MLXArray, penalty: Float
|
||||
) -> MLXArray {
|
||||
var logits = logits
|
||||
|
||||
if repetitionContext.shape[0] > 0 {
|
||||
let indices = repetitionContext
|
||||
var selectedLogits = take(logits, indices, axis: -1).squeezed(axis: 0)
|
||||
|
||||
selectedLogits = MLX.where(
|
||||
selectedLogits .< 0, selectedLogits * penalty, selectedLogits / penalty)
|
||||
|
||||
logits[0..., indices] = selectedLogits
|
||||
return logits
|
||||
}
|
||||
|
||||
return logits
|
||||
}
|
||||
|
||||
private func sample(logits: MLXArray, temp: Float, topP: Float = 1.0) -> MLXArray {
|
||||
if temp == 0 {
|
||||
return argMax(logits, axis: -1)
|
||||
@@ -43,23 +62,53 @@ public struct TokenIterator: Sequence, IteratorProtocol {
|
||||
let model: LLMModel
|
||||
let temp: Float
|
||||
let topP: Float
|
||||
let repetitionPenalty: Float
|
||||
let repetitionContextSize: Int
|
||||
var repetitionContext: MLXArray
|
||||
var y: MLXArray
|
||||
var cache: [(MLXArray, MLXArray)]
|
||||
|
||||
var first = true
|
||||
|
||||
public init(prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0) {
|
||||
public init(
|
||||
prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0,
|
||||
repetitionPenalty: Float = 1.0, repetitionContextSize: Int = 20
|
||||
) {
|
||||
self.model = model
|
||||
self.temp = temp
|
||||
self.topP = topP
|
||||
self.y = prompt
|
||||
self.cache = []
|
||||
self.repetitionPenalty = repetitionPenalty
|
||||
self.repetitionContextSize = repetitionContextSize
|
||||
if repetitionContextSize > 1 {
|
||||
if prompt.shape[0] <= repetitionContextSize {
|
||||
self.repetitionContext = prompt
|
||||
} else {
|
||||
self.repetitionContext = prompt[-repetitionContextSize ... -1]
|
||||
}
|
||||
} else {
|
||||
self.repetitionContext = []
|
||||
}
|
||||
}
|
||||
|
||||
mutating public func next() -> MLXArray? {
|
||||
var logits: MLXArray
|
||||
(logits, cache) = model(expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache)
|
||||
y = sample(logits: logits[-1, axis: 1], temp: temp, topP: topP)
|
||||
logits = logits[0..., -1, 0...]
|
||||
if repetitionPenalty > 1.0 {
|
||||
// apply repetition penalty
|
||||
logits = applyRepetitionPenalty(
|
||||
logits: logits, repetitionContext: repetitionContext, penalty: repetitionPenalty)
|
||||
}
|
||||
y = sample(logits: logits, temp: temp, topP: topP)
|
||||
// append the current token to the context and check repetitionPenalty context see if need to remove the first token
|
||||
if repetitionContextSize > 1 {
|
||||
repetitionContext = concatenated([repetitionContext, y], axis: 0)
|
||||
if repetitionContext.shape[0] > repetitionContextSize {
|
||||
repetitionContext = repetitionContext[1...]
|
||||
}
|
||||
}
|
||||
|
||||
return y
|
||||
}
|
||||
@@ -71,7 +120,10 @@ public struct TokenIterator: Sequence, IteratorProtocol {
|
||||
///
|
||||
/// Note that because MLXArray is not thread safe this eval's the result and sends the TokenId back
|
||||
/// to the caller.
|
||||
public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0) -> (
|
||||
public func generate(
|
||||
prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0,
|
||||
repetitionPenalty: Float = 1.0, repetitionContextSize: Int = 20
|
||||
) -> (
|
||||
Task<Void, Never>, AsyncBufferSequence<AsyncChannel<Int>>
|
||||
) {
|
||||
let channel = AsyncChannel<Int>()
|
||||
@@ -80,12 +132,38 @@ public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP:
|
||||
let task = Task {
|
||||
var y = prompt
|
||||
var cache = [(MLXArray, MLXArray)]()
|
||||
var repetitionContext: MLXArray
|
||||
|
||||
if repetitionContextSize > 1 {
|
||||
if prompt.shape[0] <= repetitionContextSize {
|
||||
repetitionContext = prompt
|
||||
} else {
|
||||
repetitionContext = prompt[-repetitionContextSize ... -1]
|
||||
}
|
||||
} else {
|
||||
repetitionContext = []
|
||||
}
|
||||
while !Task.isCancelled {
|
||||
var logits: MLXArray
|
||||
(logits, cache) = model(
|
||||
expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache)
|
||||
y = sample(logits: logits[-1, axis: 1], temp: temp, topP: topP)
|
||||
|
||||
logits = logits[0..., -1, 0...]
|
||||
if repetitionPenalty > 1.0 {
|
||||
// apply repetition penalty
|
||||
logits = applyRepetitionPenalty(
|
||||
logits: logits, repetitionContext: repetitionContext, penalty: repetitionPenalty
|
||||
)
|
||||
}
|
||||
y = sample(logits: logits, temp: temp, topP: topP)
|
||||
// append the current token to the context and check repetitionPenalty context see if need to remove the first token
|
||||
if repetitionContextSize > 1 {
|
||||
repetitionContext = concatenated([repetitionContext, y], axis: 0)
|
||||
if repetitionContext.shape[0] > repetitionContextSize {
|
||||
repetitionContext = repetitionContext[1...]
|
||||
}
|
||||
}
|
||||
|
||||
eval(y)
|
||||
|
||||
await channel.send(y.item(Int.self))
|
||||
|
||||
@@ -32,6 +32,12 @@ struct LLMArguments: ParsableArguments {
|
||||
@Option(name: .shortAndLong, help: "The top p sampling")
|
||||
var topP: Float = 0.9
|
||||
|
||||
@Option(name: .shortAndLong, help: "The penalty factor for repeating tokens")
|
||||
var repetitionPenalty: Float = 1.0
|
||||
|
||||
@Option(name: .shortAndLong, help: "The number of tokens to consider for repetition penalty")
|
||||
var repetitionContextSize: Int = 20
|
||||
|
||||
@Option(name: .long, help: "The PRNG seed")
|
||||
var seed: UInt64 = 0
|
||||
|
||||
@@ -130,7 +136,9 @@ struct SyncGenerator: AsyncParsableCommand {
|
||||
var printed = 0
|
||||
|
||||
for token in TokenIterator(
|
||||
prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP)
|
||||
prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP,
|
||||
repetitionPenalty: args.repetitionPenalty,
|
||||
repetitionContextSize: args.repetitionContextSize)
|
||||
{
|
||||
if tokens.isEmpty {
|
||||
eval(token)
|
||||
@@ -208,7 +216,9 @@ struct AsyncGenerator: AsyncParsableCommand {
|
||||
var printed = 0
|
||||
|
||||
let (task, channel) = generate(
|
||||
prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP)
|
||||
prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP,
|
||||
repetitionPenalty: args.repetitionPenalty,
|
||||
repetitionContextSize: args.repetitionContextSize)
|
||||
|
||||
for await token in channel {
|
||||
if tokens.isEmpty {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"location" : "https://github.com/ml-explore/mlx-swift",
|
||||
"state" : {
|
||||
"branch" : "main",
|
||||
"revision" : "f4b00d8ce6917c64bd5057a4fb19433e848fdf87"
|
||||
"revision" : "a1c544c817d44cfdfa1a650f521066b565c2ae4f"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -78,7 +78,7 @@
|
||||
"location" : "https://github.com/huggingface/swift-transformers",
|
||||
"state" : {
|
||||
"branch" : "main",
|
||||
"revision" : "9d82e00af680253499f1a9372abb2552a73527fb"
|
||||
"revision" : "74b94211bdc741694ed7e700a1104c72e5ba68fe"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
@@ -55,13 +55,17 @@
|
||||
argument = "--model mlx-community/CodeLlama-13b-Instruct-hf-4bit-MLX"
|
||||
isEnabled = "NO">
|
||||
</CommandLineArgument>
|
||||
<CommandLineArgument
|
||||
argument = "--repetition-penalty 1.2"
|
||||
isEnabled = "NO">
|
||||
</CommandLineArgument>
|
||||
<CommandLineArgument
|
||||
argument = "--top-p 0.95"
|
||||
isEnabled = "NO">
|
||||
</CommandLineArgument>
|
||||
<CommandLineArgument
|
||||
argument = "--model mlx-community/c4ai-command-r-v01-4bit"
|
||||
isEnabled = "YES">
|
||||
isEnabled = "NO">
|
||||
</CommandLineArgument>
|
||||
<CommandLineArgument
|
||||
argument = "--model mlx-community/starcoder2-3b-4bit"
|
||||
@@ -77,7 +81,7 @@
|
||||
</CommandLineArgument>
|
||||
<CommandLineArgument
|
||||
argument = "--model mlx-community/Mistral-7B-v0.1-hf-4bit-mlx"
|
||||
isEnabled = "NO">
|
||||
isEnabled = "YES">
|
||||
</CommandLineArgument>
|
||||
<CommandLineArgument
|
||||
argument = "--model mlx-community/quantized-gemma-2b-it"
|
||||
|
||||
Reference in New Issue
Block a user