From c86d1c195ecccb0f25998dce9ba4842c8d9aa309 Mon Sep 17 00:00:00 2001 From: David Koski Date: Mon, 26 Feb 2024 13:23:21 -0800 Subject: [PATCH] partial fix for #1 - handle loading models with different names for the safetensors files (gemma) - handle merge tokens that can't be split - organize code into Load/Evaluate --- Libraries/LLM/Evaluate.swift | 72 +++++++++++ Libraries/LLM/{Util.swift => Load.swift} | 128 +++++++++---------- mlx-swift-examples.xcodeproj/project.pbxproj | 12 +- 3 files changed, 138 insertions(+), 74 deletions(-) create mode 100644 Libraries/LLM/Evaluate.swift rename Libraries/LLM/{Util.swift => Load.swift} (61%) diff --git a/Libraries/LLM/Evaluate.swift b/Libraries/LLM/Evaluate.swift new file mode 100644 index 0000000..be2c51a --- /dev/null +++ b/Libraries/LLM/Evaluate.swift @@ -0,0 +1,72 @@ +// Copyright © 2024 Apple Inc. + +import AsyncAlgorithms +import Foundation +import MLX +import MLXRandom + +private func sample(logits: MLXArray, temp: Float) -> MLXArray { + if temp == 0 { + return argMax(logits, axis: -1) + } else { + return categorical(logits * (1 / temp)) + } +} + +/// Synchronous generator of tokens. +/// +/// Port of `generate_step()` from https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/utils.py +public struct TokenIterator: Sequence, IteratorProtocol { + let model: LLMModel + let temp: Float + + var y: MLXArray + var cache: [(MLXArray, MLXArray)] + + var first = true + + public init(prompt: MLXArray, model: LLMModel, temp: Float = 0.0) { + self.model = model + self.temp = temp + self.y = prompt + self.cache = [] + } + + mutating public func next() -> MLXArray? { + var logits: MLXArray + (logits, cache) = model(expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache) + y = sample(logits: logits[-1, axis: 1], temp: temp) + + return y + } +} + +/// Async generator of tokens. +/// +/// Port of `generate_step()` from https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/utils.py. +/// +/// Note that because MLXArray is not thread safe this eval's the result and sends the TokenId back +/// to the caller. +public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0) -> ( + Task, AsyncBufferSequence> +) { + let channel = AsyncChannel() + let buffer = channel.buffer(policy: .bounded(10)) + + let task = Task { + var y = prompt + var cache = [(MLXArray, MLXArray)]() + + while !Task.isCancelled { + var logits: MLXArray + (logits, cache) = model( + expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache) + y = sample(logits: logits[-1, axis: 1], temp: temp) + eval(y) + + await channel.send(y.item(Int.self)) + } + } + + return (task, buffer) +} diff --git a/Libraries/LLM/Util.swift b/Libraries/LLM/Load.swift similarity index 61% rename from Libraries/LLM/Util.swift rename to Libraries/LLM/Load.swift index 233fd57..44a9ece 100644 --- a/Libraries/LLM/Util.swift +++ b/Libraries/LLM/Load.swift @@ -21,7 +21,7 @@ public func load( // download the model weights and config let repo = Hub.Repo(id: name) - let modelFiles = ["config.json", "weights.00.safetensors"] + let modelFiles = ["config.json", "*.safetensors"] let modelDirectory = try await hub.snapshot( from: repo, matching: modelFiles, progressHandler: progressHandler) @@ -33,7 +33,17 @@ public func load( let model = try baseConfig.modelType.createModel(configuration: configurationURL) // load the weights - let weights = try loadArrays(url: modelDirectory.appending(component: "weights.00.safetensors")) + var weights = [String: MLXArray]() + let enumerator = FileManager.default.enumerator( + at: modelDirectory, includingPropertiesForKeys: nil)! + for case let url as URL in enumerator { + if url.pathExtension == "safetensors" { + let w = try loadArrays(url: url) + for (key, value) in w { + weights[key] = value + } + } + } // quantize if needed if let quantization = baseConfig.quantization { @@ -49,14 +59,17 @@ public func load( return (model, tokenizer) } +// MARK: - Tokenizers + public func loadTokenizer(name: String) async throws -> Tokenizer { // from AutoTokenizer.from() -- this lets us override parts of the configuration let config = LanguageModelConfigurationFromHub(modelName: name) guard var tokenizerConfig = try await config.tokenizerConfig else { throw LLMError(message: "missing config") } - let tokenizerData = try await config.tokenizerData + var tokenizerData = try await config.tokenizerData + // workaround: replacement tokenizers for unhandled values in swift-transform if let tokenizerClass = tokenizerConfig.tokenizerClass?.stringValue, let replacement = replacementTokenizers[tokenizerClass] { @@ -65,14 +78,55 @@ public func loadTokenizer(name: String) async throws -> Tokenizer { tokenizerConfig = Config(dictionary) } + // workaround: some merges can't be split on space in BPETokenizer + if let tokenizerClass = tokenizerConfig.tokenizerClass?.stringValue { + switch tokenizerClass { + case "T5Tokenizer": + break + default: + tokenizerData = discardUnhandledMerges(tokenizerData: tokenizerData) + } + } + return try PreTrainedTokenizer(tokenizerConfig: tokenizerConfig, tokenizerData: tokenizerData) } +public func discardUnhandledMerges(tokenizerData: Config) -> Config { + // see https://github.com/ml-explore/mlx-swift-examples/issues/1 + + if let model = tokenizerData.model { + if let merges = model.dictionary["merges"] as? [String] { + // discard any merges that can't be split on a space + // (required by BPETokenizer) + let newMerges = + merges + .filter { + $0.split(separator: " ").count == 2 + } + + if newMerges.count != merges.count { + var newModel = model.dictionary + newModel["merges"] = newMerges + + var newTokenizerData = tokenizerData.dictionary + newTokenizerData["model"] = newModel + + return Config(newTokenizerData) + } + } + } + + return tokenizerData +} + /// overrides for TokenizerModel/knownTokenizers let replacementTokenizers = [ - "CodeLlamaTokenizer": "LlamaTokenizer" + "CodeLlamaTokenizer": "LlamaTokenizer", + "GemmaTokenizer": "PreTrainedTokenizer", ] +// MARK: - Quantization + private func quantizeIfNeeded( model: LLMModel, weights: [String: MLXArray], quantization: BaseConfiguration.Quantization ) { @@ -105,69 +159,3 @@ private func quantizeIfNeeded( model: model, groupSize: quantization.groupSize, bits: quantization.bits, predicate: predicate) } - -private func sample(logits: MLXArray, temp: Float) -> MLXArray { - if temp == 0 { - return argMax(logits, axis: -1) - } else { - return categorical(logits * (1 / temp)) - } -} - -/// Synchronous generator of tokens. -/// -/// Port of `generate_step()` from https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/utils.py -public struct TokenIterator: Sequence, IteratorProtocol { - let model: LLMModel - let temp: Float - - var y: MLXArray - var cache: [(MLXArray, MLXArray)] - - var first = true - - public init(prompt: MLXArray, model: LLMModel, temp: Float = 0.0) { - self.model = model - self.temp = temp - self.y = prompt - self.cache = [] - } - - mutating public func next() -> MLXArray? { - var logits: MLXArray - (logits, cache) = model(expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache) - y = sample(logits: logits[-1, axis: 1], temp: temp) - - return y - } -} - -/// Async generator of tokens. -/// -/// Port of `generate_step()` from https://github.com/ml-explore/mlx-examples/blob/main/llms/mlx_lm/utils.py. -/// -/// Note that because MLXArray is not thread safe this eval's the result and sends the TokenId back -/// to the caller. -public func generate(prompt: MLXArray, model: LLMModel, temp: Float = 0.0) -> ( - Task, AsyncBufferSequence> -) { - let channel = AsyncChannel() - let buffer = channel.buffer(policy: .bounded(10)) - - let task = Task { - var y = prompt - var cache = [(MLXArray, MLXArray)]() - - while !Task.isCancelled { - var logits: MLXArray - (logits, cache) = model( - expandedDimensions(y, axis: 0), cache: cache.isEmpty ? nil : cache) - y = sample(logits: logits[-1, axis: 1], temp: temp) - eval(y) - - await channel.send(y.item(Int.self)) - } - } - - return (task, buffer) -} diff --git a/mlx-swift-examples.xcodeproj/project.pbxproj b/mlx-swift-examples.xcodeproj/project.pbxproj index 555ad92..f033265 100644 --- a/mlx-swift-examples.xcodeproj/project.pbxproj +++ b/mlx-swift-examples.xcodeproj/project.pbxproj @@ -21,7 +21,7 @@ C38935C82B869C7A0037B833 /* LLM.h in Headers */ = {isa = PBXBuildFile; fileRef = C38935C72B869C7A0037B833 /* LLM.h */; settings = {ATTRIBUTES = (Public, ); }; }; C38935CC2B869C870037B833 /* Llama.swift in Sources */ = {isa = PBXBuildFile; fileRef = C34E48EE2B696E6500FCB841 /* Llama.swift */; }; C38935CD2B869C870037B833 /* Configuration.swift in Sources */ = {isa = PBXBuildFile; fileRef = C34E48EF2B696E6500FCB841 /* Configuration.swift */; }; - C38935CE2B869C870037B833 /* Util.swift in Sources */ = {isa = PBXBuildFile; fileRef = C34E48ED2B696E6500FCB841 /* Util.swift */; }; + C38935CE2B869C870037B833 /* Load.swift in Sources */ = {isa = PBXBuildFile; fileRef = C34E48ED2B696E6500FCB841 /* Load.swift */; }; C38935D02B869CC40037B833 /* MLX in Frameworks */ = {isa = PBXBuildFile; productRef = C38935CF2B869CC40037B833 /* MLX */; }; C38935D22B869CC40037B833 /* MLXNN in Frameworks */ = {isa = PBXBuildFile; productRef = C38935D12B869CC40037B833 /* MLXNN */; }; C38935D42B869CC40037B833 /* MLXRandom in Frameworks */ = {isa = PBXBuildFile; productRef = C38935D32B869CC40037B833 /* MLXRandom */; }; @@ -36,6 +36,7 @@ C3932D572B6A060B00A81055 /* MNIST.swift in Sources */ = {isa = PBXBuildFile; fileRef = C3932D562B6A060B00A81055 /* MNIST.swift */; }; C3932D592B6A0BE400A81055 /* Random.swift in Sources */ = {isa = PBXBuildFile; fileRef = C3932D582B6A0BE400A81055 /* Random.swift */; }; C397C59C2B62C6D0004B084D /* ArgumentParser in Frameworks */ = {isa = PBXBuildFile; productRef = C397C59B2B62C6D0004B084D /* ArgumentParser */; }; + C3E786AB2B8D1AEC0004D037 /* Evaluate.swift in Sources */ = {isa = PBXBuildFile; fileRef = C3E786AA2B8D1AEC0004D037 /* Evaluate.swift */; }; C3FBCB212B8520B80007E490 /* MLX in Frameworks */ = {isa = PBXBuildFile; productRef = C3FBCB202B8520B80007E490 /* MLX */; }; C3FBCB292B8520DA0007E490 /* MLX in Frameworks */ = {isa = PBXBuildFile; productRef = C3FBCB282B8520DA0007E490 /* MLX */; }; C3FBCB2B2B8520DA0007E490 /* MLXNN in Frameworks */ = {isa = PBXBuildFile; productRef = C3FBCB2A2B8520DA0007E490 /* MLXNN */; }; @@ -129,7 +130,7 @@ C3288D732B6D9313009FF608 /* LinearModelTraining */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = LinearModelTraining; sourceTree = BUILT_PRODUCTS_DIR; }; C3288D752B6D9313009FF608 /* LinearModelTraining.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LinearModelTraining.swift; sourceTree = ""; }; C3288D842B6D94BD009FF608 /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = ""; }; - C34E48ED2B696E6500FCB841 /* Util.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Util.swift; sourceTree = ""; }; + C34E48ED2B696E6500FCB841 /* Load.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Load.swift; sourceTree = ""; }; C34E48EE2B696E6500FCB841 /* Llama.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Llama.swift; sourceTree = ""; }; C34E48EF2B696E6500FCB841 /* Configuration.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Configuration.swift; sourceTree = ""; }; C34E48F42B696F0B00FCB841 /* LLMTool.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = LLMTool.swift; sourceTree = ""; }; @@ -152,6 +153,7 @@ C397C58B2B62C6A9004B084D /* llm-tool */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "llm-tool"; sourceTree = BUILT_PRODUCTS_DIR; }; C3C3240B2B6CA689007D2D9A /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = ""; }; C3C3240C2B6CA792007D2D9A /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = ""; }; + C3E786AA2B8D1AEC0004D037 /* Evaluate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Evaluate.swift; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -269,7 +271,8 @@ C38935E02B869F420037B833 /* LLMModel.swift */, C38935DE2B869DD00037B833 /* Phi.swift */, C34E48F62B69832600FCB841 /* README.md */, - C34E48ED2B696E6500FCB841 /* Util.swift */, + C34E48ED2B696E6500FCB841 /* Load.swift */, + C3E786AA2B8D1AEC0004D037 /* Evaluate.swift */, ); path = LLM; sourceTree = ""; @@ -606,7 +609,8 @@ C38935E32B86C0FE0037B833 /* Gemma.swift in Sources */, C38935CD2B869C870037B833 /* Configuration.swift in Sources */, C38935DF2B869DD00037B833 /* Phi.swift in Sources */, - C38935CE2B869C870037B833 /* Util.swift in Sources */, + C38935CE2B869C870037B833 /* Load.swift in Sources */, + C3E786AB2B8D1AEC0004D037 /* Evaluate.swift in Sources */, C38935CC2B869C870037B833 /* Llama.swift in Sources */, ); runOnlyForDeploymentPostprocessing = 0;