Models
Before You Start #
API Reference Quicklinks #
Create/Delete Functions | Get Functions | Misc Functions |
---|---|---|
lore.create_hf_model |
lore.get_models |
lore.upload_model_to_hf |
lore.register_model |
lore.get_model |
lore.download_model |
lore.delete_model |
lore.get_base_models |
|
lore.get_base_model |
Create & Register Models #
Via Hugging Face #
hf_model_name = "gpt2" # Example Hugging Face model name
branch = None # Specify if you want to use a specific branch of the model repository, otherwise defaults to 'main'
hf_model_commit = None # Enter a specific commit hash to pin the model to a particular state for reproducibility. Example: 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
formatting_tokens = None # Define formatting tokens if needed, otherwise leave as None
problem_type = "" # Specify the problem type, e.g., "text-generation"
model_config = None # Additional model configuration settings if needed
# Create the Hugging Face model
hf_model = lore.create_hf_model(
hf_model_name=hf_model_name,
branch=branch,
hf_model_commit=hf_model_commit,
formatting_tokens=formatting_tokens,
problem_type=problem_type,
model_config=model_config
)
# Register the Hugging Face model
registered_model = lore.register_model(model=hf_model)
Get Models #
By List #
models = lore.get_models()
all_base_models = lore.get_base_models()
By Base Name #
llama2_base = lore.get_base_model("meta-llama/Llama-2-7b-hf")
By ID #
This method is most useful when you already have a list of models and you want to get a specific model by its ID.
model = lore.get_model(some_model.id)
models = lore.get_experiment_models(exp.id)
for m in models:
print(
f"Model name = {m.genai_model_name}, model_id = {m.id}, experiment_id={m.source_experiment_id}"
)
specific_model = lore.get_model(models[0].id)
By Name #
model = lore.get_model_by_name("model-name")
model = lore.get_model_by_name(
model_name="meta-llama/Llama-2-7b-hf",
is_base_model=True
)
Load Models #
llama2_base = lore.get_base_model("meta-llama/Llama-2-7b-hf")
lore.load_model(
llama2_base,
hf_token=HF_TOKEN,
)
Upload Models #
To Hugging Face #
uploading_model = lore.get_model_by_name("model-name")
hf_repo_owner = "your_hf_username"
hf_repo_name = "your_model_repo"
hf_token = "your_hf_auth_token"
private = False # Set to True if needed
# Upload the model to Hugging Face
experiment_id = lore_client.upload_model_to_hf(
model=uploading_model,
hf_repo_owner=hf_repo_owner,
hf_repo_name=hf_repo_name,
hf_token=hf_token,
private=private
)
Download Models #
model_id = 123 # Replace with the actual ID of the model you wish to download
output_path = "/path/to/save/model" # Replace with the desired path to save the downloaded model
mlde_host = "http://your.mlde.host" # Replace with the actual MLDE host address, if needed
# Download the model
downloaded_model_path = lore_client.download_model(
model_id=model_id,
output_path=output_path,
mlde_host=mlde_host # This is optional and can be omitted if not required
)
Delete Models #
my_old_model = lore.get_model_by_name("model-name")
lore.delete_model(my_old_model.id)