Skip to content
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,32 @@ resources:
spark_version: $DEFAULT_SPARK_VERSION
node_type_id: $NODE_TYPE_ID
num_workers: 1

clusters:
test_cluster:
cluster_name: cluster-$UNIQUE_NAME
spark_version: $DEFAULT_SPARK_VERSION
node_type_id: $NODE_TYPE_ID
num_workers: 1

experiments:
test_experiment:
name: /Users/$CURRENT_USER_NAME/experiment-$UNIQUE_NAME

registered_models:
test_model:
name: model_$UNIQUE_NAME
catalog_name: main
schema_name: default

volumes:
test_volume:
name: volume_$UNIQUE_NAME
catalog_name: main
schema_name: default
volume_type: MANAGED

sql_warehouses:
test_warehouse:
name: warehouse-$UNIQUE_NAME
cluster_size: 2X-Small
Original file line number Diff line number Diff line change
Expand Up @@ -39,16 +39,29 @@ Resource: resources.jobs.job_two
+ max_concurrent_runs: 10
tasks:
- task_key: main
@@ -25,2 +27,4 @@
node_type_id: [NODE_TYPE_ID]
@@ -26,4 +28,6 @@
num_workers: 1

+ tags:
+ team: ml
clusters:
test_cluster:

>>> [CLI] bundle destroy --auto-approve
The following resources will be deleted:
delete resources.clusters.test_cluster
delete resources.experiments.test_experiment
delete resources.jobs.job_one
delete resources.jobs.job_two
delete resources.registered_models.test_model
delete resources.sql_warehouses.test_warehouse
delete resources.volumes.test_volume

This action will result in the deletion of the following volumes.
For managed volumes, the files stored in the volume are also deleted from your
cloud tenant within 30 days. For external volumes, the metadata about the volume
is removed from the catalog, but the underlying files are not deleted:
delete resources.volumes.test_volume

All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/test-bundle-[UNIQUE_NAME]/default

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
envsubst < databricks.yml.tmpl > databricks.yml

cleanup() {
# Restore original config before destroy to avoid Terraform errors
# from server-side-only fields (e.g. creator_name) written by config-remote-sync.
envsubst < databricks.yml.tmpl > databricks.yml
trace $CLI bundle destroy --auto-approve
}
trap cleanup EXIT
Expand All @@ -13,6 +16,9 @@ $CLI bundle deploy
job_one_id="$(read_id.py job_one)"
job_two_id="$(read_id.py job_two)"

# Add replacements for dynamic values that appear in server-side defaults
add_repl.py "$($CLI current-user me | jq -r .id)" "USER_ID"
add_repl.py "$($CLI metastores current | jq -r .metastore_id)" "METASTORE_ID"

title "Modify both jobs"
edit_resource.py jobs $job_one_id <<EOF
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,15 @@
},
"remote_state": {
"autotermination_minutes": 60,
"aws_attributes": {
"availability": "SPOT_WITH_FALLBACK",
"zone_id": "us-east-1c"
},
"cluster_id": "[CLUSTER_ID]",
"cluster_name": "test-cluster-[UNIQUE_NAME]",
"data_security_mode": "SINGLE_USER",
"driver_node_type_id": "[NODE_TYPE_ID]",
"enable_elastic_disk": false,
"node_type_id": "[NODE_TYPE_ID]",
"num_workers": 2,
"spark_version": "13.3.x-snapshot-scala2.12"
Expand All @@ -52,6 +59,29 @@
"min_workers": 2
}
},
"aws_attributes": {
"action": "skip",
"reason": "server_side_default",
"remote": {
"availability": "SPOT_WITH_FALLBACK",
"zone_id": "us-east-1c"
}
},
"data_security_mode": {
"action": "skip",
"reason": "server_side_default",
"remote": "SINGLE_USER"
},
"driver_node_type_id": {
"action": "skip",
"reason": "server_side_default",
"remote": "[NODE_TYPE_ID]"
},
"enable_elastic_disk": {
"action": "skip",
"reason": "empty",
"remote": false
},
"num_workers": {
"action": "update",
"old": 2,
Expand Down Expand Up @@ -87,8 +117,15 @@
"min_workers": 2
},
"autotermination_minutes": 60,
"aws_attributes": {
"availability": "SPOT_WITH_FALLBACK",
"zone_id": "us-east-1c"
},
"cluster_id": "[CLUSTER_ID]",
"cluster_name": "test-cluster-[UNIQUE_NAME]",
"data_security_mode": "SINGLE_USER",
"driver_node_type_id": "[NODE_TYPE_ID]",
"enable_elastic_disk": false,
"node_type_id": "[NODE_TYPE_ID]",
"spark_version": "13.3.x-snapshot-scala2.12"
},
Expand All @@ -104,6 +141,29 @@
"old": 2,
"new": 3,
"remote": 2
},
"aws_attributes": {
"action": "skip",
"reason": "server_side_default",
"remote": {
"availability": "SPOT_WITH_FALLBACK",
"zone_id": "us-east-1c"
}
},
"data_security_mode": {
"action": "skip",
"reason": "server_side_default",
"remote": "SINGLE_USER"
},
"driver_node_type_id": {
"action": "skip",
"reason": "server_side_default",
"remote": "[NODE_TYPE_ID]"
},
"enable_elastic_disk": {
"action": "skip",
"reason": "empty",
"remote": false
}
}
}
Expand Down Expand Up @@ -135,8 +195,15 @@
"min_workers": 3
},
"autotermination_minutes": 60,
"aws_attributes": {
"availability": "SPOT_WITH_FALLBACK",
"zone_id": "us-east-1c"
},
"cluster_id": "[CLUSTER_ID]",
"cluster_name": "test-cluster-[UNIQUE_NAME]",
"data_security_mode": "SINGLE_USER",
"driver_node_type_id": "[NODE_TYPE_ID]",
"enable_elastic_disk": false,
"node_type_id": "[NODE_TYPE_ID]",
"spark_version": "13.3.x-snapshot-scala2.12",
"state": "RUNNING"
Expand All @@ -155,6 +222,29 @@
"old": 3,
"new": 4,
"remote": 3
},
"aws_attributes": {
"action": "skip",
"reason": "server_side_default",
"remote": {
"availability": "SPOT_WITH_FALLBACK",
"zone_id": "us-east-1c"
}
},
"data_security_mode": {
"action": "skip",
"reason": "server_side_default",
"remote": "SINGLE_USER"
},
"driver_node_type_id": {
"action": "skip",
"reason": "server_side_default",
"remote": "[NODE_TYPE_ID]"
},
"enable_elastic_disk": {
"action": "skip",
"reason": "empty",
"remote": false
}
}
}
Expand Down Expand Up @@ -183,8 +273,15 @@
"min_workers": 4
},
"autotermination_minutes": 60,
"aws_attributes": {
"availability": "SPOT_WITH_FALLBACK",
"zone_id": "us-east-1c"
},
"cluster_id": "[CLUSTER_ID]",
"cluster_name": "test-cluster-[UNIQUE_NAME]",
"data_security_mode": "SINGLE_USER",
"driver_node_type_id": "[NODE_TYPE_ID]",
"enable_elastic_disk": false,
"node_type_id": "[NODE_TYPE_ID]",
"spark_version": "13.3.x-snapshot-scala2.12",
"state": "RUNNING"
Expand All @@ -202,6 +299,29 @@
"min_workers": 4
}
},
"aws_attributes": {
"action": "skip",
"reason": "server_side_default",
"remote": {
"availability": "SPOT_WITH_FALLBACK",
"zone_id": "us-east-1c"
}
},
"data_security_mode": {
"action": "skip",
"reason": "server_side_default",
"remote": "SINGLE_USER"
},
"driver_node_type_id": {
"action": "skip",
"reason": "server_side_default",
"remote": "[NODE_TYPE_ID]"
},
"enable_elastic_disk": {
"action": "skip",
"reason": "empty",
"remote": false
},
"num_workers": {
"action": "resize",
"reason": "custom",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ Deploying resources...
Updating deployment state...
Deployment complete!

>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt
>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk) out.requests.txt
{
"method": "POST",
"path": "/api/2.1/clusters/edit",
Expand Down Expand Up @@ -55,7 +55,7 @@ Deploying resources...
Updating deployment state...
Deployment complete!

>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt
>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk) out.requests.txt
{
"method": "POST",
"path": "/api/2.1/clusters/edit",
Expand Down Expand Up @@ -89,8 +89,15 @@ Deployment complete!
"min_workers":3
},
"autotermination_minutes":60,
"aws_attributes": {
"availability":"SPOT_WITH_FALLBACK",
"zone_id":"us-east-1c"
},
"cluster_id":"[CLUSTER_ID]",
"cluster_name":"test-cluster-[UNIQUE_NAME]",
"data_security_mode":"SINGLE_USER",
"driver_node_type_id":"[NODE_TYPE_ID]",
"enable_elastic_disk":false,
"node_type_id":"[NODE_TYPE_ID]",
"spark_version":"13.3.x-snapshot-scala2.12",
"state":"RUNNING"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ update_file.py databricks.yml " num_workers: 2" " autoscale:
$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt
$CLI bundle plan -o json >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json
trace $CLI bundle deploy
trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt
trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk)' out.requests.txt
rm out.requests.txt

title "Cluster should have autoscale\n"
Expand All @@ -34,7 +34,7 @@ update_file.py databricks.yml "max_workers: 4" "max_workers: 5"
$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt
$CLI bundle plan -o json >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json
trace $CLI bundle deploy
trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt
trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk)' out.requests.txt
rm out.requests.txt

title "Cluster should have new autoscale\n"
Expand Down
Loading