add LLM evaluator example

- runs on iOS and macOS
- downloads a model / tokenizer from hugging face
- evaluates the given prompt
This commit is contained in:
David Koski
2024-03-01 16:10:00 -08:00
parent 79e0620891
commit b41f14fba7
11 changed files with 645 additions and 4 deletions

View File

@@ -54,9 +54,9 @@ public struct ModelConfiguration {
extension ModelConfiguration {
static let mistral7B4bit = ModelConfiguration(id: "mlx-community/Mistral-7B-v0.1-hf-4bit-mlx")
public static let mistral7B4bit = ModelConfiguration(id: "mlx-community/Mistral-7B-v0.1-hf-4bit-mlx")
static let codeLlama13b4bit = ModelConfiguration(
public static let codeLlama13b4bit = ModelConfiguration(
id: "mlx-community/CodeLlama-13b-Instruct-hf-4bit-MLX",
overrideTokenizer: "PreTrainedTokenizer"
) { prompt in
@@ -67,11 +67,11 @@ extension ModelConfiguration {
"<PRE> " + prompt.replacingOccurrences(of: "<FILL_ME>", with: "<SUF>") + " <MID>"
}
static let phi4bit = ModelConfiguration(id: "mlx-community/phi-2-hf-4bit-mlx") { prompt in
public static let phi4bit = ModelConfiguration(id: "mlx-community/phi-2-hf-4bit-mlx") { prompt in
"Instruct: \(prompt). Output: "
}
static let gemma2bQuantized = ModelConfiguration(
public static let gemma2bQuantized = ModelConfiguration(
id: "mlx-community/quantized-gemma-2b-it",
overrideTokenizer: "PreTrainedTokenizer"
) { prompt in