implement LoRA / QLoRA (#46)
* implement LoRA / QLoRA - example of using MLX to fine-tune an LLM with low rank adaptation (LoRA) for a target task - see also https://arxiv.org/abs/2106.09685 - based on https://github.com/ml-explore/mlx-examples/tree/main/lora * add some command line flags I found useful during use - --quiet -- don't print decorator text, just the generated text - --prompt @/tmp/file.txt -- load prompt from file * user can specify path to model OR model identifier in huggingface * update mlx-swift reference Co-authored-by: Ashraful Islam <ashraful.meche@gmail.com> Co-authored-by: JustinMeans <46542161+JustinMeans@users.noreply.github.com>
This commit is contained in:
@@ -4,10 +4,20 @@ import Foundation
|
||||
import Hub
|
||||
import Tokenizers
|
||||
|
||||
public func loadTokenizer(configuration: ModelConfiguration) async throws -> Tokenizer {
|
||||
public func loadTokenizer(configuration: ModelConfiguration, hub: HubApi) async throws -> Tokenizer
|
||||
{
|
||||
// from AutoTokenizer.from() -- this lets us override parts of the configuration
|
||||
let config = LanguageModelConfigurationFromHub(
|
||||
modelName: configuration.tokenizerId ?? configuration.id)
|
||||
|
||||
let config: LanguageModelConfigurationFromHub
|
||||
|
||||
switch configuration.id {
|
||||
case .id(let id):
|
||||
config = LanguageModelConfigurationFromHub(
|
||||
modelName: configuration.tokenizerId ?? id, hubApi: hub)
|
||||
case .directory(let directory):
|
||||
config = LanguageModelConfigurationFromHub(modelFolder: directory, hubApi: hub)
|
||||
}
|
||||
|
||||
guard var tokenizerConfig = try await config.tokenizerConfig else {
|
||||
throw LLMError(message: "missing config")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user