Creates a training job with the specified configuration.
curl --request POST \
--url https://api.baseten.co/v1/training_projects/{training_project_id}/jobs \
--header "Authorization: Api-Key $BASETEN_API_KEY" \
--data '{
"training_job": {
"image": {
"base_image": "hello-world",
"docker_auth": null
},
"compute": {
"node_count": 1,
"cpu_count": 1,
"memory": "2Gi",
"accelerator": {
"accelerator": "H100",
"count": 2
}
},
"runtime": {
"start_commands": [
"python main.py"
],
"environment_variables": {
"API_KEY": "your_api_key_here",
"PATH": "/usr/bin"
},
"artifacts": null,
"enable_cache": true,
"cache_config": {
"enable_legacy_hf_mount": true,
"enabled": true,
"mount_base_path": "/root/.cache",
"require_cache_affinity": true
},
"checkpointing_config": {
"enabled": true,
"checkpoint_path": "/mnt/ckpts",
"volume_size_gib": 10
},
"load_checkpoint_config": null
},
"name": "gpt-oss-job",
"truss_user_env": null,
"interactive_session": null,
"weights": [
{
"allow_patterns": null,
"auth_secret_name": null,
"ignore_patterns": null,
"mount_location": "/app/models/base",
"source": "hf://meta-llama/Llama-3-8B@main"
}
]
}
}'{
"training_job": {
"id": "<string>",
"created_at": "2023-11-07T05:31:56Z",
"current_status": "<string>",
"instance_type": {
"id": "<string>",
"name": "<string>",
"memory_limit_mib": 123,
"millicpu_limit": 123,
"gpu_count": 123,
"gpu_type": "<string>",
"gpu_memory_limit_mib": 123
},
"updated_at": "2023-11-07T05:31:56Z",
"training_project_id": "<string>",
"training_project": {
"id": "<string>",
"name": "<string>"
},
"error_message": "<string>",
"name": "gpt-oss-job"
}
}You must specify the scheme 'Api-Key' in the Authorization header. For example, Authorization: Api-Key <Your_Api_Key>
A request to create a training job.
The training job to create.
Show child attributes
A response to creating a training job.
A response to creating a training job.
The created training job.
Show child attributes
Was this page helpful?
curl --request POST \
--url https://api.baseten.co/v1/training_projects/{training_project_id}/jobs \
--header "Authorization: Api-Key $BASETEN_API_KEY" \
--data '{
"training_job": {
"image": {
"base_image": "hello-world",
"docker_auth": null
},
"compute": {
"node_count": 1,
"cpu_count": 1,
"memory": "2Gi",
"accelerator": {
"accelerator": "H100",
"count": 2
}
},
"runtime": {
"start_commands": [
"python main.py"
],
"environment_variables": {
"API_KEY": "your_api_key_here",
"PATH": "/usr/bin"
},
"artifacts": null,
"enable_cache": true,
"cache_config": {
"enable_legacy_hf_mount": true,
"enabled": true,
"mount_base_path": "/root/.cache",
"require_cache_affinity": true
},
"checkpointing_config": {
"enabled": true,
"checkpoint_path": "/mnt/ckpts",
"volume_size_gib": 10
},
"load_checkpoint_config": null
},
"name": "gpt-oss-job",
"truss_user_env": null,
"interactive_session": null,
"weights": [
{
"allow_patterns": null,
"auth_secret_name": null,
"ignore_patterns": null,
"mount_location": "/app/models/base",
"source": "hf://meta-llama/Llama-3-8B@main"
}
]
}
}'{
"training_job": {
"id": "<string>",
"created_at": "2023-11-07T05:31:56Z",
"current_status": "<string>",
"instance_type": {
"id": "<string>",
"name": "<string>",
"memory_limit_mib": 123,
"millicpu_limit": 123,
"gpu_count": 123,
"gpu_type": "<string>",
"gpu_memory_limit_mib": 123
},
"updated_at": "2023-11-07T05:31:56Z",
"training_project_id": "<string>",
"training_project": {
"id": "<string>",
"name": "<string>"
},
"error_message": "<string>",
"name": "gpt-oss-job"
}
}