From c27208812df20bc64988fbc125f3c0728776194c Mon Sep 17 00:00:00 2001
From: Anchen
Date: Fri, 5 Apr 2024 09:15:50 +1100
Subject: [PATCH] chore: add repetition_penalty example (#45)
---
Libraries/LLM/Evaluate.swift | 86 ++++++++++++++++++-
Tools/llm-tool/LLMTool.swift | 14 ++-
.../xcshareddata/swiftpm/Package.resolved | 4 +-
.../xcshareddata/xcschemes/llm-tool.xcscheme | 8 +-
4 files changed, 102 insertions(+), 10 deletions(-)
diff --git a/Libraries/LLM/Evaluate.swift b/Libraries/LLM/Evaluate.swift
index bfc04b4..9267121 100644
--- a/Libraries/LLM/Evaluate.swift
+++ b/Libraries/LLM/Evaluate.swift
@@ -25,6 +25,25 @@ private func topPSampling(logits: MLXArray, topP: Float, temp: Float) -> MLXArra
return sortedIndices.squeezed(axis: 0)[sortedToken]
}
+private func applyRepetitionPenalty(
+ logits: MLXArray, repetitionContext: MLXArray, penalty: Float
+) -> MLXArray {
+ var logits = logits
+
+ if repetitionContext.shape[0] > 0 {
+ let indices = repetitionContext
+ var selectedLogits = take(logits, indices, axis: -1).squeezed(axis: 0)
+
+ selectedLogits = MLX.where(
+ selectedLogits .< 0, selectedLogits * penalty, selectedLogits / penalty)
+
+ logits[0..., indices] = selectedLogits
+ return logits
+ }
+
+ return logits
+}
+
private func sample(logits: MLXArray, temp: Float, topP: Float = 1.0) -> MLXArray {
if temp == 0 {
return argMax(logits, axis: -1)
@@ -43,23 +62,53 @@ public struct TokenIterator: Sequence, IteratorProtocol {
let model: LLMModel
let temp: Float
let topP: Float
+ let repetitionPenalty: Float
+ let repetitionContextSize: Int
+ var repetitionContext: MLXArray
var y: MLXArray
var cache: [(MLXArray, MLXArray)]
var first = true
- public init(prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0) {
+ public init(
+ prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0,
+ repetitionPenalty: Float = 1.0, repetitionContextSize: Int = 20
+ ) {
self.model = model
self.temp = temp
self.topP = topP
self.y = prompt
self.cache = []
+ self.repetitionPenalty = repetitionPenalty
+ self.repetitionContextSize = repetitionContextSize
+ if repetitionContextSize > 1 {
+ if prompt.shape[0] <= repetitionContextSize {
+ self.repetitionContext = prompt
+ } else {
+ self.repetitionContext = prompt[-repetitionContextSize ... -1]
+ }
+ } else {
+ self.repetitionContext = []
+ }
}
mutating public func next() -> MLXArray? {
var logits: MLXArray
(logits, cache) = model(expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache)
- y = sample(logits: logits[-1, axis: 1], temp: temp, topP: topP)
+ logits = logits[0..., -1, 0...]
+ if repetitionPenalty > 1.0 {
+ // apply repetition penalty
+ logits = applyRepetitionPenalty(
+ logits: logits, repetitionContext: repetitionContext, penalty: repetitionPenalty)
+ }
+ y = sample(logits: logits, temp: temp, topP: topP)
+ // append the current token to the context and check repetitionPenalty context see if need to remove the first token
+ if repetitionContextSize > 1 {
+ repetitionContext = concatenated([repetitionContext, y], axis: 0)
+ if repetitionContext.shape[0] > repetitionContextSize {
+ repetitionContext = repetitionContext[1...]
+ }
+ }
return y
}
@@ -71,7 +120,10 @@ public struct TokenIterator: Sequence, IteratorProtocol {
///
/// Note that because MLXArray is not thread safe this eval's the result and sends the TokenId back
/// to the caller.
-public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0) -> (
+public func generate(
+ prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP: Float = 1.0,
+ repetitionPenalty: Float = 1.0, repetitionContextSize: Int = 20
+) -> (
Task, AsyncBufferSequence>
) {
let channel = AsyncChannel()
@@ -80,12 +132,38 @@ public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0, topP:
let task = Task {
var y = prompt
var cache = [(MLXArray, MLXArray)]()
+ var repetitionContext: MLXArray
+ if repetitionContextSize > 1 {
+ if prompt.shape[0] <= repetitionContextSize {
+ repetitionContext = prompt
+ } else {
+ repetitionContext = prompt[-repetitionContextSize ... -1]
+ }
+ } else {
+ repetitionContext = []
+ }
while !Task.isCancelled {
var logits: MLXArray
(logits, cache) = model(
expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache)
- y = sample(logits: logits[-1, axis: 1], temp: temp, topP: topP)
+
+ logits = logits[0..., -1, 0...]
+ if repetitionPenalty > 1.0 {
+ // apply repetition penalty
+ logits = applyRepetitionPenalty(
+ logits: logits, repetitionContext: repetitionContext, penalty: repetitionPenalty
+ )
+ }
+ y = sample(logits: logits, temp: temp, topP: topP)
+ // append the current token to the context and check repetitionPenalty context see if need to remove the first token
+ if repetitionContextSize > 1 {
+ repetitionContext = concatenated([repetitionContext, y], axis: 0)
+ if repetitionContext.shape[0] > repetitionContextSize {
+ repetitionContext = repetitionContext[1...]
+ }
+ }
+
eval(y)
await channel.send(y.item(Int.self))
diff --git a/Tools/llm-tool/LLMTool.swift b/Tools/llm-tool/LLMTool.swift
index e2d2133..cdbe708 100644
--- a/Tools/llm-tool/LLMTool.swift
+++ b/Tools/llm-tool/LLMTool.swift
@@ -32,6 +32,12 @@ struct LLMArguments: ParsableArguments {
@Option(name: .shortAndLong, help: "The top p sampling")
var topP: Float = 0.9
+ @Option(name: .shortAndLong, help: "The penalty factor for repeating tokens")
+ var repetitionPenalty: Float = 1.0
+
+ @Option(name: .shortAndLong, help: "The number of tokens to consider for repetition penalty")
+ var repetitionContextSize: Int = 20
+
@Option(name: .long, help: "The PRNG seed")
var seed: UInt64 = 0
@@ -130,7 +136,9 @@ struct SyncGenerator: AsyncParsableCommand {
var printed = 0
for token in TokenIterator(
- prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP)
+ prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP,
+ repetitionPenalty: args.repetitionPenalty,
+ repetitionContextSize: args.repetitionContextSize)
{
if tokens.isEmpty {
eval(token)
@@ -208,7 +216,9 @@ struct AsyncGenerator: AsyncParsableCommand {
var printed = 0
let (task, channel) = generate(
- prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP)
+ prompt: MLXArray(promptTokens), model: model, temp: args.temperature, topP: args.topP,
+ repetitionPenalty: args.repetitionPenalty,
+ repetitionContextSize: args.repetitionContextSize)
for await token in channel {
if tokens.isEmpty {
diff --git a/mlx-swift-examples.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/mlx-swift-examples.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved
index 3628bc8..5b241b4 100644
--- a/mlx-swift-examples.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved
+++ b/mlx-swift-examples.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved
@@ -15,7 +15,7 @@
"location" : "https://github.com/ml-explore/mlx-swift",
"state" : {
"branch" : "main",
- "revision" : "f4b00d8ce6917c64bd5057a4fb19433e848fdf87"
+ "revision" : "a1c544c817d44cfdfa1a650f521066b565c2ae4f"
}
},
{
@@ -78,7 +78,7 @@
"location" : "https://github.com/huggingface/swift-transformers",
"state" : {
"branch" : "main",
- "revision" : "9d82e00af680253499f1a9372abb2552a73527fb"
+ "revision" : "74b94211bdc741694ed7e700a1104c72e5ba68fe"
}
}
],
diff --git a/mlx-swift-examples.xcodeproj/xcshareddata/xcschemes/llm-tool.xcscheme b/mlx-swift-examples.xcodeproj/xcshareddata/xcschemes/llm-tool.xcscheme
index 402aa35..d778298 100644
--- a/mlx-swift-examples.xcodeproj/xcshareddata/xcschemes/llm-tool.xcscheme
+++ b/mlx-swift-examples.xcodeproj/xcshareddata/xcschemes/llm-tool.xcscheme
@@ -55,13 +55,17 @@
argument = "--model mlx-community/CodeLlama-13b-Instruct-hf-4bit-MLX"
isEnabled = "NO">
+
+
+ isEnabled = "NO">
+ isEnabled = "YES">