1. Packages
  2. Databricks Provider
  3. API Docs
  4. Job
Databricks v1.63.0 published on Thursday, Mar 13, 2025 by Pulumi

databricks.Job

Explore with Pulumi AI

The databricks.Job resource allows you to manage Databricks Jobs to run non-interactive code in a databricks_cluster.

Example Usage

In Pulumi configuration, it is recommended to define tasks in alphabetical order of their task_key arguments, so that you get consistent and readable diff. Whenever tasks are added or removed, or task_key is renamed, you’ll observe a change in the majority of tasks. It’s related to the fact that the current version of the provider treats task blocks as an ordered list. Alternatively, task block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task.

It is possible to create a Databricks job using task blocks. A single task is defined with the task block containing one of the *_task blocks, task_key, and additional arguments described below.

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const _this = new databricks.Job("this", {
    name: "Job with multiple tasks",
    description: "This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.",
    jobClusters: [{
        jobClusterKey: "j",
        newCluster: {
            numWorkers: 2,
            sparkVersion: latest.id,
            nodeTypeId: smallest.id,
        },
    }],
    tasks: [
        {
            taskKey: "a",
            newCluster: {
                numWorkers: 1,
                sparkVersion: latest.id,
                nodeTypeId: smallest.id,
            },
            notebookTask: {
                notebookPath: thisDatabricksNotebook.path,
            },
        },
        {
            taskKey: "b",
            dependsOns: [{
                taskKey: "a",
            }],
            existingClusterId: shared.id,
            sparkJarTask: {
                mainClassName: "com.acme.data.Main",
            },
        },
        {
            taskKey: "c",
            jobClusterKey: "j",
            notebookTask: {
                notebookPath: thisDatabricksNotebook.path,
            },
        },
        {
            taskKey: "d",
            pipelineTask: {
                pipelineId: thisDatabricksPipeline.id,
            },
        },
    ],
});
Copy
import pulumi
import pulumi_databricks as databricks

this = databricks.Job("this",
    name="Job with multiple tasks",
    description="This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.",
    job_clusters=[{
        "job_cluster_key": "j",
        "new_cluster": {
            "num_workers": 2,
            "spark_version": latest["id"],
            "node_type_id": smallest["id"],
        },
    }],
    tasks=[
        {
            "task_key": "a",
            "new_cluster": {
                "num_workers": 1,
                "spark_version": latest["id"],
                "node_type_id": smallest["id"],
            },
            "notebook_task": {
                "notebook_path": this_databricks_notebook["path"],
            },
        },
        {
            "task_key": "b",
            "depends_ons": [{
                "task_key": "a",
            }],
            "existing_cluster_id": shared["id"],
            "spark_jar_task": {
                "main_class_name": "com.acme.data.Main",
            },
        },
        {
            "task_key": "c",
            "job_cluster_key": "j",
            "notebook_task": {
                "notebook_path": this_databricks_notebook["path"],
            },
        },
        {
            "task_key": "d",
            "pipeline_task": {
                "pipeline_id": this_databricks_pipeline["id"],
            },
        },
    ])
Copy
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
			Name:        pulumi.String("Job with multiple tasks"),
			Description: pulumi.String("This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished."),
			JobClusters: databricks.JobJobClusterArray{
				&databricks.JobJobClusterArgs{
					JobClusterKey: pulumi.String("j"),
					NewCluster: &databricks.JobJobClusterNewClusterArgs{
						NumWorkers:   pulumi.Int(2),
						SparkVersion: pulumi.Any(latest.Id),
						NodeTypeId:   pulumi.Any(smallest.Id),
					},
				},
			},
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("a"),
					NewCluster: &databricks.JobTaskNewClusterArgs{
						NumWorkers:   pulumi.Int(1),
						SparkVersion: pulumi.Any(latest.Id),
						NodeTypeId:   pulumi.Any(smallest.Id),
					},
					NotebookTask: &databricks.JobTaskNotebookTaskArgs{
						NotebookPath: pulumi.Any(thisDatabricksNotebook.Path),
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("b"),
					DependsOns: databricks.JobTaskDependsOnArray{
						&databricks.JobTaskDependsOnArgs{
							TaskKey: pulumi.String("a"),
						},
					},
					ExistingClusterId: pulumi.Any(shared.Id),
					SparkJarTask: &databricks.JobTaskSparkJarTaskArgs{
						MainClassName: pulumi.String("com.acme.data.Main"),
					},
				},
				&databricks.JobTaskArgs{
					TaskKey:       pulumi.String("c"),
					JobClusterKey: pulumi.String("j"),
					NotebookTask: &databricks.JobTaskNotebookTaskArgs{
						NotebookPath: pulumi.Any(thisDatabricksNotebook.Path),
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("d"),
					PipelineTask: &databricks.JobTaskPipelineTaskArgs{
						PipelineId: pulumi.Any(thisDatabricksPipeline.Id),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var @this = new Databricks.Job("this", new()
    {
        Name = "Job with multiple tasks",
        Description = "This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.",
        JobClusters = new[]
        {
            new Databricks.Inputs.JobJobClusterArgs
            {
                JobClusterKey = "j",
                NewCluster = new Databricks.Inputs.JobJobClusterNewClusterArgs
                {
                    NumWorkers = 2,
                    SparkVersion = latest.Id,
                    NodeTypeId = smallest.Id,
                },
            },
        },
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "a",
                NewCluster = new Databricks.Inputs.JobTaskNewClusterArgs
                {
                    NumWorkers = 1,
                    SparkVersion = latest.Id,
                    NodeTypeId = smallest.Id,
                },
                NotebookTask = new Databricks.Inputs.JobTaskNotebookTaskArgs
                {
                    NotebookPath = thisDatabricksNotebook.Path,
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "b",
                DependsOns = new[]
                {
                    new Databricks.Inputs.JobTaskDependsOnArgs
                    {
                        TaskKey = "a",
                    },
                },
                ExistingClusterId = shared.Id,
                SparkJarTask = new Databricks.Inputs.JobTaskSparkJarTaskArgs
                {
                    MainClassName = "com.acme.data.Main",
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "c",
                JobClusterKey = "j",
                NotebookTask = new Databricks.Inputs.JobTaskNotebookTaskArgs
                {
                    NotebookPath = thisDatabricksNotebook.Path,
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "d",
                PipelineTask = new Databricks.Inputs.JobTaskPipelineTaskArgs
                {
                    PipelineId = thisDatabricksPipeline.Id,
                },
            },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobJobClusterArgs;
import com.pulumi.databricks.inputs.JobJobClusterNewClusterArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskNewClusterArgs;
import com.pulumi.databricks.inputs.JobTaskNotebookTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSparkJarTaskArgs;
import com.pulumi.databricks.inputs.JobTaskPipelineTaskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var this_ = new Job("this", JobArgs.builder()
            .name("Job with multiple tasks")
            .description("This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.")
            .jobClusters(JobJobClusterArgs.builder()
                .jobClusterKey("j")
                .newCluster(JobJobClusterNewClusterArgs.builder()
                    .numWorkers(2)
                    .sparkVersion(latest.id())
                    .nodeTypeId(smallest.id())
                    .build())
                .build())
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("a")
                    .newCluster(JobTaskNewClusterArgs.builder()
                        .numWorkers(1)
                        .sparkVersion(latest.id())
                        .nodeTypeId(smallest.id())
                        .build())
                    .notebookTask(JobTaskNotebookTaskArgs.builder()
                        .notebookPath(thisDatabricksNotebook.path())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("b")
                    .dependsOns(JobTaskDependsOnArgs.builder()
                        .taskKey("a")
                        .build())
                    .existingClusterId(shared.id())
                    .sparkJarTask(JobTaskSparkJarTaskArgs.builder()
                        .mainClassName("com.acme.data.Main")
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("c")
                    .jobClusterKey("j")
                    .notebookTask(JobTaskNotebookTaskArgs.builder()
                        .notebookPath(thisDatabricksNotebook.path())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("d")
                    .pipelineTask(JobTaskPipelineTaskArgs.builder()
                        .pipelineId(thisDatabricksPipeline.id())
                        .build())
                    .build())
            .build());

    }
}
Copy
resources:
  this:
    type: databricks:Job
    properties:
      name: Job with multiple tasks
      description: This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.
      jobClusters:
        - jobClusterKey: j
          newCluster:
            numWorkers: 2
            sparkVersion: ${latest.id}
            nodeTypeId: ${smallest.id}
      tasks:
        - taskKey: a
          newCluster:
            numWorkers: 1
            sparkVersion: ${latest.id}
            nodeTypeId: ${smallest.id}
          notebookTask:
            notebookPath: ${thisDatabricksNotebook.path}
        - taskKey: b
          dependsOns:
            - taskKey: a
          existingClusterId: ${shared.id}
          sparkJarTask:
            mainClassName: com.acme.data.Main
        - taskKey: c
          jobClusterKey: j
          notebookTask:
            notebookPath: ${thisDatabricksNotebook.path}
        - taskKey: d
          pipelineTask:
            pipelineId: ${thisDatabricksPipeline.id}
Copy

Access Control

By default, all users can create and modify jobs unless an administrator enables jobs access control. With jobs access control, individual permissions determine a user’s abilities.

  • databricks.Permissions can control which groups or individual users can Can View, Can Manage Run, and Can Manage.
  • databricks.ClusterPolicy can control which kinds of clusters users can create for jobs.

Create Job Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new Job(name: string, args?: JobArgs, opts?: CustomResourceOptions);
@overload
def Job(resource_name: str,
        args: Optional[JobArgs] = None,
        opts: Optional[ResourceOptions] = None)

@overload
def Job(resource_name: str,
        opts: Optional[ResourceOptions] = None,
        always_running: Optional[bool] = None,
        budget_policy_id: Optional[str] = None,
        continuous: Optional[JobContinuousArgs] = None,
        control_run_state: Optional[bool] = None,
        dbt_task: Optional[JobDbtTaskArgs] = None,
        deployment: Optional[JobDeploymentArgs] = None,
        description: Optional[str] = None,
        edit_mode: Optional[str] = None,
        email_notifications: Optional[JobEmailNotificationsArgs] = None,
        environments: Optional[Sequence[JobEnvironmentArgs]] = None,
        existing_cluster_id: Optional[str] = None,
        format: Optional[str] = None,
        git_source: Optional[JobGitSourceArgs] = None,
        health: Optional[JobHealthArgs] = None,
        job_clusters: Optional[Sequence[JobJobClusterArgs]] = None,
        libraries: Optional[Sequence[JobLibraryArgs]] = None,
        max_concurrent_runs: Optional[int] = None,
        max_retries: Optional[int] = None,
        min_retry_interval_millis: Optional[int] = None,
        name: Optional[str] = None,
        new_cluster: Optional[JobNewClusterArgs] = None,
        notebook_task: Optional[JobNotebookTaskArgs] = None,
        notification_settings: Optional[JobNotificationSettingsArgs] = None,
        parameters: Optional[Sequence[JobParameterArgs]] = None,
        performance_target: Optional[str] = None,
        pipeline_task: Optional[JobPipelineTaskArgs] = None,
        python_wheel_task: Optional[JobPythonWheelTaskArgs] = None,
        queue: Optional[JobQueueArgs] = None,
        retry_on_timeout: Optional[bool] = None,
        run_as: Optional[JobRunAsArgs] = None,
        run_job_task: Optional[JobRunJobTaskArgs] = None,
        schedule: Optional[JobScheduleArgs] = None,
        spark_jar_task: Optional[JobSparkJarTaskArgs] = None,
        spark_python_task: Optional[JobSparkPythonTaskArgs] = None,
        spark_submit_task: Optional[JobSparkSubmitTaskArgs] = None,
        tags: Optional[Mapping[str, str]] = None,
        tasks: Optional[Sequence[JobTaskArgs]] = None,
        timeout_seconds: Optional[int] = None,
        trigger: Optional[JobTriggerArgs] = None,
        webhook_notifications: Optional[JobWebhookNotificationsArgs] = None)
func NewJob(ctx *Context, name string, args *JobArgs, opts ...ResourceOption) (*Job, error)
public Job(string name, JobArgs? args = null, CustomResourceOptions? opts = null)
public Job(String name, JobArgs args)
public Job(String name, JobArgs args, CustomResourceOptions options)
type: databricks:Job
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args JobArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args JobArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args JobArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args JobArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. JobArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var jobResource = new Databricks.Job("jobResource", new()
{
    BudgetPolicyId = "string",
    Continuous = new Databricks.Inputs.JobContinuousArgs
    {
        PauseStatus = "string",
    },
    ControlRunState = false,
    Deployment = new Databricks.Inputs.JobDeploymentArgs
    {
        Kind = "string",
        MetadataFilePath = "string",
    },
    Description = "string",
    EditMode = "string",
    EmailNotifications = new Databricks.Inputs.JobEmailNotificationsArgs
    {
        NoAlertForSkippedRuns = false,
        OnDurationWarningThresholdExceededs = new[]
        {
            "string",
        },
        OnFailures = new[]
        {
            "string",
        },
        OnStarts = new[]
        {
            "string",
        },
        OnStreamingBacklogExceededs = new[]
        {
            "string",
        },
        OnSuccesses = new[]
        {
            "string",
        },
    },
    Environments = new[]
    {
        new Databricks.Inputs.JobEnvironmentArgs
        {
            EnvironmentKey = "string",
            Spec = new Databricks.Inputs.JobEnvironmentSpecArgs
            {
                Client = "string",
                Dependencies = new[]
                {
                    "string",
                },
            },
        },
    },
    ExistingClusterId = "string",
    Format = "string",
    GitSource = new Databricks.Inputs.JobGitSourceArgs
    {
        Url = "string",
        Branch = "string",
        Commit = "string",
        GitSnapshot = new Databricks.Inputs.JobGitSourceGitSnapshotArgs
        {
            UsedCommit = "string",
        },
        JobSource = new Databricks.Inputs.JobGitSourceJobSourceArgs
        {
            ImportFromGitBranch = "string",
            JobConfigPath = "string",
            DirtyState = "string",
        },
        Provider = "string",
        Tag = "string",
    },
    Health = new Databricks.Inputs.JobHealthArgs
    {
        Rules = new[]
        {
            new Databricks.Inputs.JobHealthRuleArgs
            {
                Metric = "string",
                Op = "string",
                Value = 0,
            },
        },
    },
    JobClusters = new[]
    {
        new Databricks.Inputs.JobJobClusterArgs
        {
            JobClusterKey = "string",
            NewCluster = new Databricks.Inputs.JobJobClusterNewClusterArgs
            {
                SparkVersion = "string",
                IdempotencyToken = "string",
                SshPublicKeys = new[]
                {
                    "string",
                },
                AzureAttributes = new Databricks.Inputs.JobJobClusterNewClusterAzureAttributesArgs
                {
                    Availability = "string",
                    FirstOnDemand = 0,
                    LogAnalyticsInfo = new Databricks.Inputs.JobJobClusterNewClusterAzureAttributesLogAnalyticsInfoArgs
                    {
                        LogAnalyticsPrimaryKey = "string",
                        LogAnalyticsWorkspaceId = "string",
                    },
                    SpotBidMaxPrice = 0,
                },
                ClusterId = "string",
                ClusterLogConf = new Databricks.Inputs.JobJobClusterNewClusterClusterLogConfArgs
                {
                    Dbfs = new Databricks.Inputs.JobJobClusterNewClusterClusterLogConfDbfsArgs
                    {
                        Destination = "string",
                    },
                    S3 = new Databricks.Inputs.JobJobClusterNewClusterClusterLogConfS3Args
                    {
                        Destination = "string",
                        CannedAcl = "string",
                        EnableEncryption = false,
                        EncryptionType = "string",
                        Endpoint = "string",
                        KmsKey = "string",
                        Region = "string",
                    },
                    Volumes = new Databricks.Inputs.JobJobClusterNewClusterClusterLogConfVolumesArgs
                    {
                        Destination = "string",
                    },
                },
                ClusterMountInfos = new[]
                {
                    new Databricks.Inputs.JobJobClusterNewClusterClusterMountInfoArgs
                    {
                        LocalMountDirPath = "string",
                        NetworkFilesystemInfo = new Databricks.Inputs.JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfoArgs
                        {
                            ServerAddress = "string",
                            MountOptions = "string",
                        },
                        RemoteMountDirPath = "string",
                    },
                },
                InitScripts = new[]
                {
                    new Databricks.Inputs.JobJobClusterNewClusterInitScriptArgs
                    {
                        Abfss = new Databricks.Inputs.JobJobClusterNewClusterInitScriptAbfssArgs
                        {
                            Destination = "string",
                        },
                        File = new Databricks.Inputs.JobJobClusterNewClusterInitScriptFileArgs
                        {
                            Destination = "string",
                        },
                        Gcs = new Databricks.Inputs.JobJobClusterNewClusterInitScriptGcsArgs
                        {
                            Destination = "string",
                        },
                        S3 = new Databricks.Inputs.JobJobClusterNewClusterInitScriptS3Args
                        {
                            Destination = "string",
                            CannedAcl = "string",
                            EnableEncryption = false,
                            EncryptionType = "string",
                            Endpoint = "string",
                            KmsKey = "string",
                            Region = "string",
                        },
                        Volumes = new Databricks.Inputs.JobJobClusterNewClusterInitScriptVolumesArgs
                        {
                            Destination = "string",
                        },
                        Workspace = new Databricks.Inputs.JobJobClusterNewClusterInitScriptWorkspaceArgs
                        {
                            Destination = "string",
                        },
                    },
                },
                CustomTags = 
                {
                    { "string", "string" },
                },
                DataSecurityMode = "string",
                DockerImage = new Databricks.Inputs.JobJobClusterNewClusterDockerImageArgs
                {
                    Url = "string",
                    BasicAuth = new Databricks.Inputs.JobJobClusterNewClusterDockerImageBasicAuthArgs
                    {
                        Password = "string",
                        Username = "string",
                    },
                },
                DriverInstancePoolId = "string",
                DriverNodeTypeId = "string",
                EnableElasticDisk = false,
                EnableLocalDiskEncryption = false,
                WorkloadType = new Databricks.Inputs.JobJobClusterNewClusterWorkloadTypeArgs
                {
                    Clients = new Databricks.Inputs.JobJobClusterNewClusterWorkloadTypeClientsArgs
                    {
                        Jobs = false,
                        Notebooks = false,
                    },
                },
                AwsAttributes = new Databricks.Inputs.JobJobClusterNewClusterAwsAttributesArgs
                {
                    Availability = "string",
                    EbsVolumeCount = 0,
                    EbsVolumeIops = 0,
                    EbsVolumeSize = 0,
                    EbsVolumeThroughput = 0,
                    EbsVolumeType = "string",
                    FirstOnDemand = 0,
                    InstanceProfileArn = "string",
                    SpotBidPricePercent = 0,
                    ZoneId = "string",
                },
                ClusterName = "string",
                InstancePoolId = "string",
                IsSingleNode = false,
                Kind = "string",
                Libraries = new[]
                {
                    new Databricks.Inputs.JobJobClusterNewClusterLibraryArgs
                    {
                        Cran = new Databricks.Inputs.JobJobClusterNewClusterLibraryCranArgs
                        {
                            Package = "string",
                            Repo = "string",
                        },
                        Egg = "string",
                        Jar = "string",
                        Maven = new Databricks.Inputs.JobJobClusterNewClusterLibraryMavenArgs
                        {
                            Coordinates = "string",
                            Exclusions = new[]
                            {
                                "string",
                            },
                            Repo = "string",
                        },
                        Pypi = new Databricks.Inputs.JobJobClusterNewClusterLibraryPypiArgs
                        {
                            Package = "string",
                            Repo = "string",
                        },
                        Requirements = "string",
                        Whl = "string",
                    },
                },
                NodeTypeId = "string",
                NumWorkers = 0,
                PolicyId = "string",
                RuntimeEngine = "string",
                SingleUserName = "string",
                SparkConf = 
                {
                    { "string", "string" },
                },
                SparkEnvVars = 
                {
                    { "string", "string" },
                },
                Autoscale = new Databricks.Inputs.JobJobClusterNewClusterAutoscaleArgs
                {
                    MaxWorkers = 0,
                    MinWorkers = 0,
                },
                ApplyPolicyDefaultValues = false,
                UseMlRuntime = false,
                GcpAttributes = new Databricks.Inputs.JobJobClusterNewClusterGcpAttributesArgs
                {
                    Availability = "string",
                    BootDiskSize = 0,
                    GoogleServiceAccount = "string",
                    LocalSsdCount = 0,
                    UsePreemptibleExecutors = false,
                    ZoneId = "string",
                },
            },
        },
    },
    Libraries = new[]
    {
        new Databricks.Inputs.JobLibraryArgs
        {
            Cran = new Databricks.Inputs.JobLibraryCranArgs
            {
                Package = "string",
                Repo = "string",
            },
            Egg = "string",
            Jar = "string",
            Maven = new Databricks.Inputs.JobLibraryMavenArgs
            {
                Coordinates = "string",
                Exclusions = new[]
                {
                    "string",
                },
                Repo = "string",
            },
            Pypi = new Databricks.Inputs.JobLibraryPypiArgs
            {
                Package = "string",
                Repo = "string",
            },
            Requirements = "string",
            Whl = "string",
        },
    },
    MaxConcurrentRuns = 0,
    Name = "string",
    NewCluster = new Databricks.Inputs.JobNewClusterArgs
    {
        SparkVersion = "string",
        IdempotencyToken = "string",
        SshPublicKeys = new[]
        {
            "string",
        },
        AzureAttributes = new Databricks.Inputs.JobNewClusterAzureAttributesArgs
        {
            Availability = "string",
            FirstOnDemand = 0,
            LogAnalyticsInfo = new Databricks.Inputs.JobNewClusterAzureAttributesLogAnalyticsInfoArgs
            {
                LogAnalyticsPrimaryKey = "string",
                LogAnalyticsWorkspaceId = "string",
            },
            SpotBidMaxPrice = 0,
        },
        ClusterId = "string",
        ClusterLogConf = new Databricks.Inputs.JobNewClusterClusterLogConfArgs
        {
            Dbfs = new Databricks.Inputs.JobNewClusterClusterLogConfDbfsArgs
            {
                Destination = "string",
            },
            S3 = new Databricks.Inputs.JobNewClusterClusterLogConfS3Args
            {
                Destination = "string",
                CannedAcl = "string",
                EnableEncryption = false,
                EncryptionType = "string",
                Endpoint = "string",
                KmsKey = "string",
                Region = "string",
            },
            Volumes = new Databricks.Inputs.JobNewClusterClusterLogConfVolumesArgs
            {
                Destination = "string",
            },
        },
        ClusterMountInfos = new[]
        {
            new Databricks.Inputs.JobNewClusterClusterMountInfoArgs
            {
                LocalMountDirPath = "string",
                NetworkFilesystemInfo = new Databricks.Inputs.JobNewClusterClusterMountInfoNetworkFilesystemInfoArgs
                {
                    ServerAddress = "string",
                    MountOptions = "string",
                },
                RemoteMountDirPath = "string",
            },
        },
        InitScripts = new[]
        {
            new Databricks.Inputs.JobNewClusterInitScriptArgs
            {
                Abfss = new Databricks.Inputs.JobNewClusterInitScriptAbfssArgs
                {
                    Destination = "string",
                },
                File = new Databricks.Inputs.JobNewClusterInitScriptFileArgs
                {
                    Destination = "string",
                },
                Gcs = new Databricks.Inputs.JobNewClusterInitScriptGcsArgs
                {
                    Destination = "string",
                },
                S3 = new Databricks.Inputs.JobNewClusterInitScriptS3Args
                {
                    Destination = "string",
                    CannedAcl = "string",
                    EnableEncryption = false,
                    EncryptionType = "string",
                    Endpoint = "string",
                    KmsKey = "string",
                    Region = "string",
                },
                Volumes = new Databricks.Inputs.JobNewClusterInitScriptVolumesArgs
                {
                    Destination = "string",
                },
                Workspace = new Databricks.Inputs.JobNewClusterInitScriptWorkspaceArgs
                {
                    Destination = "string",
                },
            },
        },
        CustomTags = 
        {
            { "string", "string" },
        },
        DataSecurityMode = "string",
        DockerImage = new Databricks.Inputs.JobNewClusterDockerImageArgs
        {
            Url = "string",
            BasicAuth = new Databricks.Inputs.JobNewClusterDockerImageBasicAuthArgs
            {
                Password = "string",
                Username = "string",
            },
        },
        DriverInstancePoolId = "string",
        DriverNodeTypeId = "string",
        EnableElasticDisk = false,
        EnableLocalDiskEncryption = false,
        WorkloadType = new Databricks.Inputs.JobNewClusterWorkloadTypeArgs
        {
            Clients = new Databricks.Inputs.JobNewClusterWorkloadTypeClientsArgs
            {
                Jobs = false,
                Notebooks = false,
            },
        },
        AwsAttributes = new Databricks.Inputs.JobNewClusterAwsAttributesArgs
        {
            Availability = "string",
            EbsVolumeCount = 0,
            EbsVolumeIops = 0,
            EbsVolumeSize = 0,
            EbsVolumeThroughput = 0,
            EbsVolumeType = "string",
            FirstOnDemand = 0,
            InstanceProfileArn = "string",
            SpotBidPricePercent = 0,
            ZoneId = "string",
        },
        ClusterName = "string",
        InstancePoolId = "string",
        IsSingleNode = false,
        Kind = "string",
        Libraries = new[]
        {
            new Databricks.Inputs.JobNewClusterLibraryArgs
            {
                Cran = new Databricks.Inputs.JobNewClusterLibraryCranArgs
                {
                    Package = "string",
                    Repo = "string",
                },
                Egg = "string",
                Jar = "string",
                Maven = new Databricks.Inputs.JobNewClusterLibraryMavenArgs
                {
                    Coordinates = "string",
                    Exclusions = new[]
                    {
                        "string",
                    },
                    Repo = "string",
                },
                Pypi = new Databricks.Inputs.JobNewClusterLibraryPypiArgs
                {
                    Package = "string",
                    Repo = "string",
                },
                Requirements = "string",
                Whl = "string",
            },
        },
        NodeTypeId = "string",
        NumWorkers = 0,
        PolicyId = "string",
        RuntimeEngine = "string",
        SingleUserName = "string",
        SparkConf = 
        {
            { "string", "string" },
        },
        SparkEnvVars = 
        {
            { "string", "string" },
        },
        Autoscale = new Databricks.Inputs.JobNewClusterAutoscaleArgs
        {
            MaxWorkers = 0,
            MinWorkers = 0,
        },
        ApplyPolicyDefaultValues = false,
        UseMlRuntime = false,
        GcpAttributes = new Databricks.Inputs.JobNewClusterGcpAttributesArgs
        {
            Availability = "string",
            BootDiskSize = 0,
            GoogleServiceAccount = "string",
            LocalSsdCount = 0,
            UsePreemptibleExecutors = false,
            ZoneId = "string",
        },
    },
    NotificationSettings = new Databricks.Inputs.JobNotificationSettingsArgs
    {
        NoAlertForCanceledRuns = false,
        NoAlertForSkippedRuns = false,
    },
    Parameters = new[]
    {
        new Databricks.Inputs.JobParameterArgs
        {
            Default = "string",
            Name = "string",
        },
    },
    PerformanceTarget = "string",
    Queue = new Databricks.Inputs.JobQueueArgs
    {
        Enabled = false,
    },
    RunAs = new Databricks.Inputs.JobRunAsArgs
    {
        ServicePrincipalName = "string",
        UserName = "string",
    },
    Schedule = new Databricks.Inputs.JobScheduleArgs
    {
        QuartzCronExpression = "string",
        TimezoneId = "string",
        PauseStatus = "string",
    },
    Tags = 
    {
        { "string", "string" },
    },
    Tasks = new[]
    {
        new Databricks.Inputs.JobTaskArgs
        {
            TaskKey = "string",
            MaxRetries = 0,
            DependsOns = new[]
            {
                new Databricks.Inputs.JobTaskDependsOnArgs
                {
                    TaskKey = "string",
                    Outcome = "string",
                },
            },
            NewCluster = new Databricks.Inputs.JobTaskNewClusterArgs
            {
                SparkVersion = "string",
                IdempotencyToken = "string",
                SshPublicKeys = new[]
                {
                    "string",
                },
                AzureAttributes = new Databricks.Inputs.JobTaskNewClusterAzureAttributesArgs
                {
                    Availability = "string",
                    FirstOnDemand = 0,
                    LogAnalyticsInfo = new Databricks.Inputs.JobTaskNewClusterAzureAttributesLogAnalyticsInfoArgs
                    {
                        LogAnalyticsPrimaryKey = "string",
                        LogAnalyticsWorkspaceId = "string",
                    },
                    SpotBidMaxPrice = 0,
                },
                ClusterId = "string",
                ClusterLogConf = new Databricks.Inputs.JobTaskNewClusterClusterLogConfArgs
                {
                    Dbfs = new Databricks.Inputs.JobTaskNewClusterClusterLogConfDbfsArgs
                    {
                        Destination = "string",
                    },
                    S3 = new Databricks.Inputs.JobTaskNewClusterClusterLogConfS3Args
                    {
                        Destination = "string",
                        CannedAcl = "string",
                        EnableEncryption = false,
                        EncryptionType = "string",
                        Endpoint = "string",
                        KmsKey = "string",
                        Region = "string",
                    },
                    Volumes = new Databricks.Inputs.JobTaskNewClusterClusterLogConfVolumesArgs
                    {
                        Destination = "string",
                    },
                },
                ClusterMountInfos = new[]
                {
                    new Databricks.Inputs.JobTaskNewClusterClusterMountInfoArgs
                    {
                        LocalMountDirPath = "string",
                        NetworkFilesystemInfo = new Databricks.Inputs.JobTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs
                        {
                            ServerAddress = "string",
                            MountOptions = "string",
                        },
                        RemoteMountDirPath = "string",
                    },
                },
                InitScripts = new[]
                {
                    new Databricks.Inputs.JobTaskNewClusterInitScriptArgs
                    {
                        Abfss = new Databricks.Inputs.JobTaskNewClusterInitScriptAbfssArgs
                        {
                            Destination = "string",
                        },
                        File = new Databricks.Inputs.JobTaskNewClusterInitScriptFileArgs
                        {
                            Destination = "string",
                        },
                        Gcs = new Databricks.Inputs.JobTaskNewClusterInitScriptGcsArgs
                        {
                            Destination = "string",
                        },
                        S3 = new Databricks.Inputs.JobTaskNewClusterInitScriptS3Args
                        {
                            Destination = "string",
                            CannedAcl = "string",
                            EnableEncryption = false,
                            EncryptionType = "string",
                            Endpoint = "string",
                            KmsKey = "string",
                            Region = "string",
                        },
                        Volumes = new Databricks.Inputs.JobTaskNewClusterInitScriptVolumesArgs
                        {
                            Destination = "string",
                        },
                        Workspace = new Databricks.Inputs.JobTaskNewClusterInitScriptWorkspaceArgs
                        {
                            Destination = "string",
                        },
                    },
                },
                CustomTags = 
                {
                    { "string", "string" },
                },
                DataSecurityMode = "string",
                DockerImage = new Databricks.Inputs.JobTaskNewClusterDockerImageArgs
                {
                    Url = "string",
                    BasicAuth = new Databricks.Inputs.JobTaskNewClusterDockerImageBasicAuthArgs
                    {
                        Password = "string",
                        Username = "string",
                    },
                },
                DriverInstancePoolId = "string",
                DriverNodeTypeId = "string",
                EnableElasticDisk = false,
                EnableLocalDiskEncryption = false,
                WorkloadType = new Databricks.Inputs.JobTaskNewClusterWorkloadTypeArgs
                {
                    Clients = new Databricks.Inputs.JobTaskNewClusterWorkloadTypeClientsArgs
                    {
                        Jobs = false,
                        Notebooks = false,
                    },
                },
                AwsAttributes = new Databricks.Inputs.JobTaskNewClusterAwsAttributesArgs
                {
                    Availability = "string",
                    EbsVolumeCount = 0,
                    EbsVolumeIops = 0,
                    EbsVolumeSize = 0,
                    EbsVolumeThroughput = 0,
                    EbsVolumeType = "string",
                    FirstOnDemand = 0,
                    InstanceProfileArn = "string",
                    SpotBidPricePercent = 0,
                    ZoneId = "string",
                },
                ClusterName = "string",
                InstancePoolId = "string",
                IsSingleNode = false,
                Kind = "string",
                Libraries = new[]
                {
                    new Databricks.Inputs.JobTaskNewClusterLibraryArgs
                    {
                        Cran = new Databricks.Inputs.JobTaskNewClusterLibraryCranArgs
                        {
                            Package = "string",
                            Repo = "string",
                        },
                        Egg = "string",
                        Jar = "string",
                        Maven = new Databricks.Inputs.JobTaskNewClusterLibraryMavenArgs
                        {
                            Coordinates = "string",
                            Exclusions = new[]
                            {
                                "string",
                            },
                            Repo = "string",
                        },
                        Pypi = new Databricks.Inputs.JobTaskNewClusterLibraryPypiArgs
                        {
                            Package = "string",
                            Repo = "string",
                        },
                        Requirements = "string",
                        Whl = "string",
                    },
                },
                NodeTypeId = "string",
                NumWorkers = 0,
                PolicyId = "string",
                RuntimeEngine = "string",
                SingleUserName = "string",
                SparkConf = 
                {
                    { "string", "string" },
                },
                SparkEnvVars = 
                {
                    { "string", "string" },
                },
                Autoscale = new Databricks.Inputs.JobTaskNewClusterAutoscaleArgs
                {
                    MaxWorkers = 0,
                    MinWorkers = 0,
                },
                ApplyPolicyDefaultValues = false,
                UseMlRuntime = false,
                GcpAttributes = new Databricks.Inputs.JobTaskNewClusterGcpAttributesArgs
                {
                    Availability = "string",
                    BootDiskSize = 0,
                    GoogleServiceAccount = "string",
                    LocalSsdCount = 0,
                    UsePreemptibleExecutors = false,
                    ZoneId = "string",
                },
            },
            Description = "string",
            DisableAutoOptimization = false,
            EmailNotifications = new Databricks.Inputs.JobTaskEmailNotificationsArgs
            {
                NoAlertForSkippedRuns = false,
                OnDurationWarningThresholdExceededs = new[]
                {
                    "string",
                },
                OnFailures = new[]
                {
                    "string",
                },
                OnStarts = new[]
                {
                    "string",
                },
                OnStreamingBacklogExceededs = new[]
                {
                    "string",
                },
                OnSuccesses = new[]
                {
                    "string",
                },
            },
            EnvironmentKey = "string",
            ExistingClusterId = "string",
            ForEachTask = new Databricks.Inputs.JobTaskForEachTaskArgs
            {
                Inputs = "string",
                Task = new Databricks.Inputs.JobTaskForEachTaskTaskArgs
                {
                    TaskKey = "string",
                    MinRetryIntervalMillis = 0,
                    DisableAutoOptimization = false,
                    NewCluster = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterArgs
                    {
                        SparkVersion = "string",
                        IdempotencyToken = "string",
                        SshPublicKeys = new[]
                        {
                            "string",
                        },
                        AzureAttributes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterAzureAttributesArgs
                        {
                            Availability = "string",
                            FirstOnDemand = 0,
                            LogAnalyticsInfo = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfoArgs
                            {
                                LogAnalyticsPrimaryKey = "string",
                                LogAnalyticsWorkspaceId = "string",
                            },
                            SpotBidMaxPrice = 0,
                        },
                        ClusterId = "string",
                        ClusterLogConf = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterLogConfArgs
                        {
                            Dbfs = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterLogConfDbfsArgs
                            {
                                Destination = "string",
                            },
                            S3 = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterLogConfS3Args
                            {
                                Destination = "string",
                                CannedAcl = "string",
                                EnableEncryption = false,
                                EncryptionType = "string",
                                Endpoint = "string",
                                KmsKey = "string",
                                Region = "string",
                            },
                            Volumes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterLogConfVolumesArgs
                            {
                                Destination = "string",
                            },
                        },
                        ClusterMountInfos = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterMountInfoArgs
                            {
                                LocalMountDirPath = "string",
                                NetworkFilesystemInfo = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs
                                {
                                    ServerAddress = "string",
                                    MountOptions = "string",
                                },
                                RemoteMountDirPath = "string",
                            },
                        },
                        InitScripts = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptArgs
                            {
                                Abfss = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptAbfssArgs
                                {
                                    Destination = "string",
                                },
                                File = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptFileArgs
                                {
                                    Destination = "string",
                                },
                                Gcs = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptGcsArgs
                                {
                                    Destination = "string",
                                },
                                S3 = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptS3Args
                                {
                                    Destination = "string",
                                    CannedAcl = "string",
                                    EnableEncryption = false,
                                    EncryptionType = "string",
                                    Endpoint = "string",
                                    KmsKey = "string",
                                    Region = "string",
                                },
                                Volumes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptVolumesArgs
                                {
                                    Destination = "string",
                                },
                                Workspace = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptWorkspaceArgs
                                {
                                    Destination = "string",
                                },
                            },
                        },
                        CustomTags = 
                        {
                            { "string", "string" },
                        },
                        DataSecurityMode = "string",
                        DockerImage = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterDockerImageArgs
                        {
                            Url = "string",
                            BasicAuth = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterDockerImageBasicAuthArgs
                            {
                                Password = "string",
                                Username = "string",
                            },
                        },
                        DriverInstancePoolId = "string",
                        DriverNodeTypeId = "string",
                        EnableElasticDisk = false,
                        EnableLocalDiskEncryption = false,
                        WorkloadType = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterWorkloadTypeArgs
                        {
                            Clients = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterWorkloadTypeClientsArgs
                            {
                                Jobs = false,
                                Notebooks = false,
                            },
                        },
                        AwsAttributes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterAwsAttributesArgs
                        {
                            Availability = "string",
                            EbsVolumeCount = 0,
                            EbsVolumeIops = 0,
                            EbsVolumeSize = 0,
                            EbsVolumeThroughput = 0,
                            EbsVolumeType = "string",
                            FirstOnDemand = 0,
                            InstanceProfileArn = "string",
                            SpotBidPricePercent = 0,
                            ZoneId = "string",
                        },
                        ClusterName = "string",
                        InstancePoolId = "string",
                        IsSingleNode = false,
                        Kind = "string",
                        Libraries = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterLibraryArgs
                            {
                                Cran = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterLibraryCranArgs
                                {
                                    Package = "string",
                                    Repo = "string",
                                },
                                Egg = "string",
                                Jar = "string",
                                Maven = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterLibraryMavenArgs
                                {
                                    Coordinates = "string",
                                    Exclusions = new[]
                                    {
                                        "string",
                                    },
                                    Repo = "string",
                                },
                                Pypi = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterLibraryPypiArgs
                                {
                                    Package = "string",
                                    Repo = "string",
                                },
                                Requirements = "string",
                                Whl = "string",
                            },
                        },
                        NodeTypeId = "string",
                        NumWorkers = 0,
                        PolicyId = "string",
                        RuntimeEngine = "string",
                        SingleUserName = "string",
                        SparkConf = 
                        {
                            { "string", "string" },
                        },
                        SparkEnvVars = 
                        {
                            { "string", "string" },
                        },
                        Autoscale = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterAutoscaleArgs
                        {
                            MaxWorkers = 0,
                            MinWorkers = 0,
                        },
                        ApplyPolicyDefaultValues = false,
                        UseMlRuntime = false,
                        GcpAttributes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterGcpAttributesArgs
                        {
                            Availability = "string",
                            BootDiskSize = 0,
                            GoogleServiceAccount = "string",
                            LocalSsdCount = 0,
                            UsePreemptibleExecutors = false,
                            ZoneId = "string",
                        },
                    },
                    Description = "string",
                    NotebookTask = new Databricks.Inputs.JobTaskForEachTaskTaskNotebookTaskArgs
                    {
                        NotebookPath = "string",
                        BaseParameters = 
                        {
                            { "string", "string" },
                        },
                        Source = "string",
                        WarehouseId = "string",
                    },
                    EmailNotifications = new Databricks.Inputs.JobTaskForEachTaskTaskEmailNotificationsArgs
                    {
                        NoAlertForSkippedRuns = false,
                        OnDurationWarningThresholdExceededs = new[]
                        {
                            "string",
                        },
                        OnFailures = new[]
                        {
                            "string",
                        },
                        OnStarts = new[]
                        {
                            "string",
                        },
                        OnStreamingBacklogExceededs = new[]
                        {
                            "string",
                        },
                        OnSuccesses = new[]
                        {
                            "string",
                        },
                    },
                    EnvironmentKey = "string",
                    ExistingClusterId = "string",
                    GenAiComputeTask = new Databricks.Inputs.JobTaskForEachTaskTaskGenAiComputeTaskArgs
                    {
                        DlRuntimeImage = "string",
                        Command = "string",
                        Compute = new Databricks.Inputs.JobTaskForEachTaskTaskGenAiComputeTaskComputeArgs
                        {
                            GpuNodePoolId = "string",
                            NumGpus = 0,
                            GpuType = "string",
                        },
                        MlflowExperimentName = "string",
                        Source = "string",
                        TrainingScriptPath = "string",
                        YamlParameters = "string",
                        YamlParametersFilePath = "string",
                    },
                    Health = new Databricks.Inputs.JobTaskForEachTaskTaskHealthArgs
                    {
                        Rules = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskHealthRuleArgs
                            {
                                Metric = "string",
                                Op = "string",
                                Value = 0,
                            },
                        },
                    },
                    JobClusterKey = "string",
                    NotificationSettings = new Databricks.Inputs.JobTaskForEachTaskTaskNotificationSettingsArgs
                    {
                        AlertOnLastAttempt = false,
                        NoAlertForCanceledRuns = false,
                        NoAlertForSkippedRuns = false,
                    },
                    MaxRetries = 0,
                    CleanRoomsNotebookTask = new Databricks.Inputs.JobTaskForEachTaskTaskCleanRoomsNotebookTaskArgs
                    {
                        CleanRoomName = "string",
                        NotebookName = "string",
                        Etag = "string",
                        NotebookBaseParameters = 
                        {
                            { "string", "string" },
                        },
                    },
                    DependsOns = new[]
                    {
                        new Databricks.Inputs.JobTaskForEachTaskTaskDependsOnArgs
                        {
                            TaskKey = "string",
                            Outcome = "string",
                        },
                    },
                    DbtTask = new Databricks.Inputs.JobTaskForEachTaskTaskDbtTaskArgs
                    {
                        Commands = new[]
                        {
                            "string",
                        },
                        Catalog = "string",
                        ProfilesDirectory = "string",
                        ProjectDirectory = "string",
                        Schema = "string",
                        Source = "string",
                        WarehouseId = "string",
                    },
                    Libraries = new[]
                    {
                        new Databricks.Inputs.JobTaskForEachTaskTaskLibraryArgs
                        {
                            Cran = new Databricks.Inputs.JobTaskForEachTaskTaskLibraryCranArgs
                            {
                                Package = "string",
                                Repo = "string",
                            },
                            Egg = "string",
                            Jar = "string",
                            Maven = new Databricks.Inputs.JobTaskForEachTaskTaskLibraryMavenArgs
                            {
                                Coordinates = "string",
                                Exclusions = new[]
                                {
                                    "string",
                                },
                                Repo = "string",
                            },
                            Pypi = new Databricks.Inputs.JobTaskForEachTaskTaskLibraryPypiArgs
                            {
                                Package = "string",
                                Repo = "string",
                            },
                            Requirements = "string",
                            Whl = "string",
                        },
                    },
                    PipelineTask = new Databricks.Inputs.JobTaskForEachTaskTaskPipelineTaskArgs
                    {
                        PipelineId = "string",
                        FullRefresh = false,
                    },
                    PythonWheelTask = new Databricks.Inputs.JobTaskForEachTaskTaskPythonWheelTaskArgs
                    {
                        EntryPoint = "string",
                        NamedParameters = 
                        {
                            { "string", "string" },
                        },
                        PackageName = "string",
                        Parameters = new[]
                        {
                            "string",
                        },
                    },
                    RetryOnTimeout = false,
                    RunIf = "string",
                    RunJobTask = new Databricks.Inputs.JobTaskForEachTaskTaskRunJobTaskArgs
                    {
                        JobId = 0,
                        DbtCommands = new[]
                        {
                            "string",
                        },
                        JarParams = new[]
                        {
                            "string",
                        },
                        JobParameters = 
                        {
                            { "string", "string" },
                        },
                        NotebookParams = 
                        {
                            { "string", "string" },
                        },
                        PipelineParams = new Databricks.Inputs.JobTaskForEachTaskTaskRunJobTaskPipelineParamsArgs
                        {
                            FullRefresh = false,
                        },
                        PythonNamedParams = 
                        {
                            { "string", "string" },
                        },
                        PythonParams = new[]
                        {
                            "string",
                        },
                        SparkSubmitParams = new[]
                        {
                            "string",
                        },
                        SqlParams = 
                        {
                            { "string", "string" },
                        },
                    },
                    SparkJarTask = new Databricks.Inputs.JobTaskForEachTaskTaskSparkJarTaskArgs
                    {
                        JarUri = "string",
                        MainClassName = "string",
                        Parameters = new[]
                        {
                            "string",
                        },
                        RunAsRepl = false,
                    },
                    SparkPythonTask = new Databricks.Inputs.JobTaskForEachTaskTaskSparkPythonTaskArgs
                    {
                        PythonFile = "string",
                        Parameters = new[]
                        {
                            "string",
                        },
                        Source = "string",
                    },
                    SparkSubmitTask = new Databricks.Inputs.JobTaskForEachTaskTaskSparkSubmitTaskArgs
                    {
                        Parameters = new[]
                        {
                            "string",
                        },
                    },
                    SqlTask = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskArgs
                    {
                        WarehouseId = "string",
                        Alert = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskAlertArgs
                        {
                            AlertId = "string",
                            PauseSubscriptions = false,
                            Subscriptions = new[]
                            {
                                new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArgs
                                {
                                    DestinationId = "string",
                                    UserName = "string",
                                },
                            },
                        },
                        Dashboard = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskDashboardArgs
                        {
                            DashboardId = "string",
                            CustomSubject = "string",
                            PauseSubscriptions = false,
                            Subscriptions = new[]
                            {
                                new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArgs
                                {
                                    DestinationId = "string",
                                    UserName = "string",
                                },
                            },
                        },
                        File = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskFileArgs
                        {
                            Path = "string",
                            Source = "string",
                        },
                        Parameters = 
                        {
                            { "string", "string" },
                        },
                        Query = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskQueryArgs
                        {
                            QueryId = "string",
                        },
                    },
                    ConditionTask = new Databricks.Inputs.JobTaskForEachTaskTaskConditionTaskArgs
                    {
                        Left = "string",
                        Op = "string",
                        Right = "string",
                    },
                    TimeoutSeconds = 0,
                    WebhookNotifications = new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsArgs
                    {
                        OnDurationWarningThresholdExceededs = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs
                            {
                                Id = "string",
                            },
                        },
                        OnFailures = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs
                            {
                                Id = "string",
                            },
                        },
                        OnStarts = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs
                            {
                                Id = "string",
                            },
                        },
                        OnStreamingBacklogExceededs = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs
                            {
                                Id = "string",
                            },
                        },
                        OnSuccesses = new[]
                        {
                            new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs
                            {
                                Id = "string",
                            },
                        },
                    },
                },
                Concurrency = 0,
            },
            GenAiComputeTask = new Databricks.Inputs.JobTaskGenAiComputeTaskArgs
            {
                DlRuntimeImage = "string",
                Command = "string",
                Compute = new Databricks.Inputs.JobTaskGenAiComputeTaskComputeArgs
                {
                    GpuNodePoolId = "string",
                    NumGpus = 0,
                    GpuType = "string",
                },
                MlflowExperimentName = "string",
                Source = "string",
                TrainingScriptPath = "string",
                YamlParameters = "string",
                YamlParametersFilePath = "string",
            },
            Health = new Databricks.Inputs.JobTaskHealthArgs
            {
                Rules = new[]
                {
                    new Databricks.Inputs.JobTaskHealthRuleArgs
                    {
                        Metric = "string",
                        Op = "string",
                        Value = 0,
                    },
                },
            },
            JobClusterKey = "string",
            Libraries = new[]
            {
                new Databricks.Inputs.JobTaskLibraryArgs
                {
                    Cran = new Databricks.Inputs.JobTaskLibraryCranArgs
                    {
                        Package = "string",
                        Repo = "string",
                    },
                    Egg = "string",
                    Jar = "string",
                    Maven = new Databricks.Inputs.JobTaskLibraryMavenArgs
                    {
                        Coordinates = "string",
                        Exclusions = new[]
                        {
                            "string",
                        },
                        Repo = "string",
                    },
                    Pypi = new Databricks.Inputs.JobTaskLibraryPypiArgs
                    {
                        Package = "string",
                        Repo = "string",
                    },
                    Requirements = "string",
                    Whl = "string",
                },
            },
            CleanRoomsNotebookTask = new Databricks.Inputs.JobTaskCleanRoomsNotebookTaskArgs
            {
                CleanRoomName = "string",
                NotebookName = "string",
                Etag = "string",
                NotebookBaseParameters = 
                {
                    { "string", "string" },
                },
            },
            WebhookNotifications = new Databricks.Inputs.JobTaskWebhookNotificationsArgs
            {
                OnDurationWarningThresholdExceededs = new[]
                {
                    new Databricks.Inputs.JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs
                    {
                        Id = "string",
                    },
                },
                OnFailures = new[]
                {
                    new Databricks.Inputs.JobTaskWebhookNotificationsOnFailureArgs
                    {
                        Id = "string",
                    },
                },
                OnStarts = new[]
                {
                    new Databricks.Inputs.JobTaskWebhookNotificationsOnStartArgs
                    {
                        Id = "string",
                    },
                },
                OnStreamingBacklogExceededs = new[]
                {
                    new Databricks.Inputs.JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs
                    {
                        Id = "string",
                    },
                },
                OnSuccesses = new[]
                {
                    new Databricks.Inputs.JobTaskWebhookNotificationsOnSuccessArgs
                    {
                        Id = "string",
                    },
                },
            },
            DbtTask = new Databricks.Inputs.JobTaskDbtTaskArgs
            {
                Commands = new[]
                {
                    "string",
                },
                Catalog = "string",
                ProfilesDirectory = "string",
                ProjectDirectory = "string",
                Schema = "string",
                Source = "string",
                WarehouseId = "string",
            },
            NotebookTask = new Databricks.Inputs.JobTaskNotebookTaskArgs
            {
                NotebookPath = "string",
                BaseParameters = 
                {
                    { "string", "string" },
                },
                Source = "string",
                WarehouseId = "string",
            },
            NotificationSettings = new Databricks.Inputs.JobTaskNotificationSettingsArgs
            {
                AlertOnLastAttempt = false,
                NoAlertForCanceledRuns = false,
                NoAlertForSkippedRuns = false,
            },
            PipelineTask = new Databricks.Inputs.JobTaskPipelineTaskArgs
            {
                PipelineId = "string",
                FullRefresh = false,
            },
            PythonWheelTask = new Databricks.Inputs.JobTaskPythonWheelTaskArgs
            {
                EntryPoint = "string",
                NamedParameters = 
                {
                    { "string", "string" },
                },
                PackageName = "string",
                Parameters = new[]
                {
                    "string",
                },
            },
            RetryOnTimeout = false,
            RunIf = "string",
            RunJobTask = new Databricks.Inputs.JobTaskRunJobTaskArgs
            {
                JobId = 0,
                DbtCommands = new[]
                {
                    "string",
                },
                JarParams = new[]
                {
                    "string",
                },
                JobParameters = 
                {
                    { "string", "string" },
                },
                NotebookParams = 
                {
                    { "string", "string" },
                },
                PipelineParams = new Databricks.Inputs.JobTaskRunJobTaskPipelineParamsArgs
                {
                    FullRefresh = false,
                },
                PythonNamedParams = 
                {
                    { "string", "string" },
                },
                PythonParams = new[]
                {
                    "string",
                },
                SparkSubmitParams = new[]
                {
                    "string",
                },
                SqlParams = 
                {
                    { "string", "string" },
                },
            },
            SparkJarTask = new Databricks.Inputs.JobTaskSparkJarTaskArgs
            {
                JarUri = "string",
                MainClassName = "string",
                Parameters = new[]
                {
                    "string",
                },
                RunAsRepl = false,
            },
            SparkPythonTask = new Databricks.Inputs.JobTaskSparkPythonTaskArgs
            {
                PythonFile = "string",
                Parameters = new[]
                {
                    "string",
                },
                Source = "string",
            },
            SparkSubmitTask = new Databricks.Inputs.JobTaskSparkSubmitTaskArgs
            {
                Parameters = new[]
                {
                    "string",
                },
            },
            SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
            {
                WarehouseId = "string",
                Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                {
                    AlertId = "string",
                    PauseSubscriptions = false,
                    Subscriptions = new[]
                    {
                        new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                        {
                            DestinationId = "string",
                            UserName = "string",
                        },
                    },
                },
                Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                {
                    DashboardId = "string",
                    CustomSubject = "string",
                    PauseSubscriptions = false,
                    Subscriptions = new[]
                    {
                        new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                        {
                            DestinationId = "string",
                            UserName = "string",
                        },
                    },
                },
                File = new Databricks.Inputs.JobTaskSqlTaskFileArgs
                {
                    Path = "string",
                    Source = "string",
                },
                Parameters = 
                {
                    { "string", "string" },
                },
                Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                {
                    QueryId = "string",
                },
            },
            ConditionTask = new Databricks.Inputs.JobTaskConditionTaskArgs
            {
                Left = "string",
                Op = "string",
                Right = "string",
            },
            TimeoutSeconds = 0,
            MinRetryIntervalMillis = 0,
        },
    },
    TimeoutSeconds = 0,
    Trigger = new Databricks.Inputs.JobTriggerArgs
    {
        FileArrival = new Databricks.Inputs.JobTriggerFileArrivalArgs
        {
            Url = "string",
            MinTimeBetweenTriggersSeconds = 0,
            WaitAfterLastChangeSeconds = 0,
        },
        PauseStatus = "string",
        Periodic = new Databricks.Inputs.JobTriggerPeriodicArgs
        {
            Interval = 0,
            Unit = "string",
        },
        Table = new Databricks.Inputs.JobTriggerTableArgs
        {
            Condition = "string",
            MinTimeBetweenTriggersSeconds = 0,
            TableNames = new[]
            {
                "string",
            },
            WaitAfterLastChangeSeconds = 0,
        },
        TableUpdate = new Databricks.Inputs.JobTriggerTableUpdateArgs
        {
            TableNames = new[]
            {
                "string",
            },
            Condition = "string",
            MinTimeBetweenTriggersSeconds = 0,
            WaitAfterLastChangeSeconds = 0,
        },
    },
    WebhookNotifications = new Databricks.Inputs.JobWebhookNotificationsArgs
    {
        OnDurationWarningThresholdExceededs = new[]
        {
            new Databricks.Inputs.JobWebhookNotificationsOnDurationWarningThresholdExceededArgs
            {
                Id = "string",
            },
        },
        OnFailures = new[]
        {
            new Databricks.Inputs.JobWebhookNotificationsOnFailureArgs
            {
                Id = "string",
            },
        },
        OnStarts = new[]
        {
            new Databricks.Inputs.JobWebhookNotificationsOnStartArgs
            {
                Id = "string",
            },
        },
        OnStreamingBacklogExceededs = new[]
        {
            new Databricks.Inputs.JobWebhookNotificationsOnStreamingBacklogExceededArgs
            {
                Id = "string",
            },
        },
        OnSuccesses = new[]
        {
            new Databricks.Inputs.JobWebhookNotificationsOnSuccessArgs
            {
                Id = "string",
            },
        },
    },
});
Copy
example, err := databricks.NewJob(ctx, "jobResource", &databricks.JobArgs{
	BudgetPolicyId: pulumi.String("string"),
	Continuous: &databricks.JobContinuousArgs{
		PauseStatus: pulumi.String("string"),
	},
	ControlRunState: pulumi.Bool(false),
	Deployment: &databricks.JobDeploymentArgs{
		Kind:             pulumi.String("string"),
		MetadataFilePath: pulumi.String("string"),
	},
	Description: pulumi.String("string"),
	EditMode:    pulumi.String("string"),
	EmailNotifications: &databricks.JobEmailNotificationsArgs{
		NoAlertForSkippedRuns: pulumi.Bool(false),
		OnDurationWarningThresholdExceededs: pulumi.StringArray{
			pulumi.String("string"),
		},
		OnFailures: pulumi.StringArray{
			pulumi.String("string"),
		},
		OnStarts: pulumi.StringArray{
			pulumi.String("string"),
		},
		OnStreamingBacklogExceededs: pulumi.StringArray{
			pulumi.String("string"),
		},
		OnSuccesses: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	Environments: databricks.JobEnvironmentArray{
		&databricks.JobEnvironmentArgs{
			EnvironmentKey: pulumi.String("string"),
			Spec: &databricks.JobEnvironmentSpecArgs{
				Client: pulumi.String("string"),
				Dependencies: pulumi.StringArray{
					pulumi.String("string"),
				},
			},
		},
	},
	ExistingClusterId: pulumi.String("string"),
	Format:            pulumi.String("string"),
	GitSource: &databricks.JobGitSourceArgs{
		Url:    pulumi.String("string"),
		Branch: pulumi.String("string"),
		Commit: pulumi.String("string"),
		GitSnapshot: &databricks.JobGitSourceGitSnapshotArgs{
			UsedCommit: pulumi.String("string"),
		},
		JobSource: &databricks.JobGitSourceJobSourceArgs{
			ImportFromGitBranch: pulumi.String("string"),
			JobConfigPath:       pulumi.String("string"),
			DirtyState:          pulumi.String("string"),
		},
		Provider: pulumi.String("string"),
		Tag:      pulumi.String("string"),
	},
	Health: &databricks.JobHealthArgs{
		Rules: databricks.JobHealthRuleArray{
			&databricks.JobHealthRuleArgs{
				Metric: pulumi.String("string"),
				Op:     pulumi.String("string"),
				Value:  pulumi.Int(0),
			},
		},
	},
	JobClusters: databricks.JobJobClusterArray{
		&databricks.JobJobClusterArgs{
			JobClusterKey: pulumi.String("string"),
			NewCluster: &databricks.JobJobClusterNewClusterArgs{
				SparkVersion:     pulumi.String("string"),
				IdempotencyToken: pulumi.String("string"),
				SshPublicKeys: pulumi.StringArray{
					pulumi.String("string"),
				},
				AzureAttributes: &databricks.JobJobClusterNewClusterAzureAttributesArgs{
					Availability:  pulumi.String("string"),
					FirstOnDemand: pulumi.Int(0),
					LogAnalyticsInfo: &databricks.JobJobClusterNewClusterAzureAttributesLogAnalyticsInfoArgs{
						LogAnalyticsPrimaryKey:  pulumi.String("string"),
						LogAnalyticsWorkspaceId: pulumi.String("string"),
					},
					SpotBidMaxPrice: pulumi.Float64(0),
				},
				ClusterId: pulumi.String("string"),
				ClusterLogConf: &databricks.JobJobClusterNewClusterClusterLogConfArgs{
					Dbfs: &databricks.JobJobClusterNewClusterClusterLogConfDbfsArgs{
						Destination: pulumi.String("string"),
					},
					S3: &databricks.JobJobClusterNewClusterClusterLogConfS3Args{
						Destination:      pulumi.String("string"),
						CannedAcl:        pulumi.String("string"),
						EnableEncryption: pulumi.Bool(false),
						EncryptionType:   pulumi.String("string"),
						Endpoint:         pulumi.String("string"),
						KmsKey:           pulumi.String("string"),
						Region:           pulumi.String("string"),
					},
					Volumes: &databricks.JobJobClusterNewClusterClusterLogConfVolumesArgs{
						Destination: pulumi.String("string"),
					},
				},
				ClusterMountInfos: databricks.JobJobClusterNewClusterClusterMountInfoArray{
					&databricks.JobJobClusterNewClusterClusterMountInfoArgs{
						LocalMountDirPath: pulumi.String("string"),
						NetworkFilesystemInfo: &databricks.JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfoArgs{
							ServerAddress: pulumi.String("string"),
							MountOptions:  pulumi.String("string"),
						},
						RemoteMountDirPath: pulumi.String("string"),
					},
				},
				InitScripts: databricks.JobJobClusterNewClusterInitScriptArray{
					&databricks.JobJobClusterNewClusterInitScriptArgs{
						Abfss: &databricks.JobJobClusterNewClusterInitScriptAbfssArgs{
							Destination: pulumi.String("string"),
						},
						File: &databricks.JobJobClusterNewClusterInitScriptFileArgs{
							Destination: pulumi.String("string"),
						},
						Gcs: &databricks.JobJobClusterNewClusterInitScriptGcsArgs{
							Destination: pulumi.String("string"),
						},
						S3: &databricks.JobJobClusterNewClusterInitScriptS3Args{
							Destination:      pulumi.String("string"),
							CannedAcl:        pulumi.String("string"),
							EnableEncryption: pulumi.Bool(false),
							EncryptionType:   pulumi.String("string"),
							Endpoint:         pulumi.String("string"),
							KmsKey:           pulumi.String("string"),
							Region:           pulumi.String("string"),
						},
						Volumes: &databricks.JobJobClusterNewClusterInitScriptVolumesArgs{
							Destination: pulumi.String("string"),
						},
						Workspace: &databricks.JobJobClusterNewClusterInitScriptWorkspaceArgs{
							Destination: pulumi.String("string"),
						},
					},
				},
				CustomTags: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				DataSecurityMode: pulumi.String("string"),
				DockerImage: &databricks.JobJobClusterNewClusterDockerImageArgs{
					Url: pulumi.String("string"),
					BasicAuth: &databricks.JobJobClusterNewClusterDockerImageBasicAuthArgs{
						Password: pulumi.String("string"),
						Username: pulumi.String("string"),
					},
				},
				DriverInstancePoolId:      pulumi.String("string"),
				DriverNodeTypeId:          pulumi.String("string"),
				EnableElasticDisk:         pulumi.Bool(false),
				EnableLocalDiskEncryption: pulumi.Bool(false),
				WorkloadType: &databricks.JobJobClusterNewClusterWorkloadTypeArgs{
					Clients: &databricks.JobJobClusterNewClusterWorkloadTypeClientsArgs{
						Jobs:      pulumi.Bool(false),
						Notebooks: pulumi.Bool(false),
					},
				},
				AwsAttributes: &databricks.JobJobClusterNewClusterAwsAttributesArgs{
					Availability:        pulumi.String("string"),
					EbsVolumeCount:      pulumi.Int(0),
					EbsVolumeIops:       pulumi.Int(0),
					EbsVolumeSize:       pulumi.Int(0),
					EbsVolumeThroughput: pulumi.Int(0),
					EbsVolumeType:       pulumi.String("string"),
					FirstOnDemand:       pulumi.Int(0),
					InstanceProfileArn:  pulumi.String("string"),
					SpotBidPricePercent: pulumi.Int(0),
					ZoneId:              pulumi.String("string"),
				},
				ClusterName:    pulumi.String("string"),
				InstancePoolId: pulumi.String("string"),
				IsSingleNode:   pulumi.Bool(false),
				Kind:           pulumi.String("string"),
				Libraries: databricks.JobJobClusterNewClusterLibraryArray{
					&databricks.JobJobClusterNewClusterLibraryArgs{
						Cran: &databricks.JobJobClusterNewClusterLibraryCranArgs{
							Package: pulumi.String("string"),
							Repo:    pulumi.String("string"),
						},
						Egg: pulumi.String("string"),
						Jar: pulumi.String("string"),
						Maven: &databricks.JobJobClusterNewClusterLibraryMavenArgs{
							Coordinates: pulumi.String("string"),
							Exclusions: pulumi.StringArray{
								pulumi.String("string"),
							},
							Repo: pulumi.String("string"),
						},
						Pypi: &databricks.JobJobClusterNewClusterLibraryPypiArgs{
							Package: pulumi.String("string"),
							Repo:    pulumi.String("string"),
						},
						Requirements: pulumi.String("string"),
						Whl:          pulumi.String("string"),
					},
				},
				NodeTypeId:     pulumi.String("string"),
				NumWorkers:     pulumi.Int(0),
				PolicyId:       pulumi.String("string"),
				RuntimeEngine:  pulumi.String("string"),
				SingleUserName: pulumi.String("string"),
				SparkConf: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				SparkEnvVars: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Autoscale: &databricks.JobJobClusterNewClusterAutoscaleArgs{
					MaxWorkers: pulumi.Int(0),
					MinWorkers: pulumi.Int(0),
				},
				ApplyPolicyDefaultValues: pulumi.Bool(false),
				UseMlRuntime:             pulumi.Bool(false),
				GcpAttributes: &databricks.JobJobClusterNewClusterGcpAttributesArgs{
					Availability:            pulumi.String("string"),
					BootDiskSize:            pulumi.Int(0),
					GoogleServiceAccount:    pulumi.String("string"),
					LocalSsdCount:           pulumi.Int(0),
					UsePreemptibleExecutors: pulumi.Bool(false),
					ZoneId:                  pulumi.String("string"),
				},
			},
		},
	},
	Libraries: databricks.JobLibraryArray{
		&databricks.JobLibraryArgs{
			Cran: &databricks.JobLibraryCranArgs{
				Package: pulumi.String("string"),
				Repo:    pulumi.String("string"),
			},
			Egg: pulumi.String("string"),
			Jar: pulumi.String("string"),
			Maven: &databricks.JobLibraryMavenArgs{
				Coordinates: pulumi.String("string"),
				Exclusions: pulumi.StringArray{
					pulumi.String("string"),
				},
				Repo: pulumi.String("string"),
			},
			Pypi: &databricks.JobLibraryPypiArgs{
				Package: pulumi.String("string"),
				Repo:    pulumi.String("string"),
			},
			Requirements: pulumi.String("string"),
			Whl:          pulumi.String("string"),
		},
	},
	MaxConcurrentRuns: pulumi.Int(0),
	Name:              pulumi.String("string"),
	NewCluster: &databricks.JobNewClusterArgs{
		SparkVersion:     pulumi.String("string"),
		IdempotencyToken: pulumi.String("string"),
		SshPublicKeys: pulumi.StringArray{
			pulumi.String("string"),
		},
		AzureAttributes: &databricks.JobNewClusterAzureAttributesArgs{
			Availability:  pulumi.String("string"),
			FirstOnDemand: pulumi.Int(0),
			LogAnalyticsInfo: &databricks.JobNewClusterAzureAttributesLogAnalyticsInfoArgs{
				LogAnalyticsPrimaryKey:  pulumi.String("string"),
				LogAnalyticsWorkspaceId: pulumi.String("string"),
			},
			SpotBidMaxPrice: pulumi.Float64(0),
		},
		ClusterId: pulumi.String("string"),
		ClusterLogConf: &databricks.JobNewClusterClusterLogConfArgs{
			Dbfs: &databricks.JobNewClusterClusterLogConfDbfsArgs{
				Destination: pulumi.String("string"),
			},
			S3: &databricks.JobNewClusterClusterLogConfS3Args{
				Destination:      pulumi.String("string"),
				CannedAcl:        pulumi.String("string"),
				EnableEncryption: pulumi.Bool(false),
				EncryptionType:   pulumi.String("string"),
				Endpoint:         pulumi.String("string"),
				KmsKey:           pulumi.String("string"),
				Region:           pulumi.String("string"),
			},
			Volumes: &databricks.JobNewClusterClusterLogConfVolumesArgs{
				Destination: pulumi.String("string"),
			},
		},
		ClusterMountInfos: databricks.JobNewClusterClusterMountInfoArray{
			&databricks.JobNewClusterClusterMountInfoArgs{
				LocalMountDirPath: pulumi.String("string"),
				NetworkFilesystemInfo: &databricks.JobNewClusterClusterMountInfoNetworkFilesystemInfoArgs{
					ServerAddress: pulumi.String("string"),
					MountOptions:  pulumi.String("string"),
				},
				RemoteMountDirPath: pulumi.String("string"),
			},
		},
		InitScripts: databricks.JobNewClusterInitScriptArray{
			&databricks.JobNewClusterInitScriptArgs{
				Abfss: &databricks.JobNewClusterInitScriptAbfssArgs{
					Destination: pulumi.String("string"),
				},
				File: &databricks.JobNewClusterInitScriptFileArgs{
					Destination: pulumi.String("string"),
				},
				Gcs: &databricks.JobNewClusterInitScriptGcsArgs{
					Destination: pulumi.String("string"),
				},
				S3: &databricks.JobNewClusterInitScriptS3Args{
					Destination:      pulumi.String("string"),
					CannedAcl:        pulumi.String("string"),
					EnableEncryption: pulumi.Bool(false),
					EncryptionType:   pulumi.String("string"),
					Endpoint:         pulumi.String("string"),
					KmsKey:           pulumi.String("string"),
					Region:           pulumi.String("string"),
				},
				Volumes: &databricks.JobNewClusterInitScriptVolumesArgs{
					Destination: pulumi.String("string"),
				},
				Workspace: &databricks.JobNewClusterInitScriptWorkspaceArgs{
					Destination: pulumi.String("string"),
				},
			},
		},
		CustomTags: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		DataSecurityMode: pulumi.String("string"),
		DockerImage: &databricks.JobNewClusterDockerImageArgs{
			Url: pulumi.String("string"),
			BasicAuth: &databricks.JobNewClusterDockerImageBasicAuthArgs{
				Password: pulumi.String("string"),
				Username: pulumi.String("string"),
			},
		},
		DriverInstancePoolId:      pulumi.String("string"),
		DriverNodeTypeId:          pulumi.String("string"),
		EnableElasticDisk:         pulumi.Bool(false),
		EnableLocalDiskEncryption: pulumi.Bool(false),
		WorkloadType: &databricks.JobNewClusterWorkloadTypeArgs{
			Clients: &databricks.JobNewClusterWorkloadTypeClientsArgs{
				Jobs:      pulumi.Bool(false),
				Notebooks: pulumi.Bool(false),
			},
		},
		AwsAttributes: &databricks.JobNewClusterAwsAttributesArgs{
			Availability:        pulumi.String("string"),
			EbsVolumeCount:      pulumi.Int(0),
			EbsVolumeIops:       pulumi.Int(0),
			EbsVolumeSize:       pulumi.Int(0),
			EbsVolumeThroughput: pulumi.Int(0),
			EbsVolumeType:       pulumi.String("string"),
			FirstOnDemand:       pulumi.Int(0),
			InstanceProfileArn:  pulumi.String("string"),
			SpotBidPricePercent: pulumi.Int(0),
			ZoneId:              pulumi.String("string"),
		},
		ClusterName:    pulumi.String("string"),
		InstancePoolId: pulumi.String("string"),
		IsSingleNode:   pulumi.Bool(false),
		Kind:           pulumi.String("string"),
		Libraries: databricks.JobNewClusterLibraryArray{
			&databricks.JobNewClusterLibraryArgs{
				Cran: &databricks.JobNewClusterLibraryCranArgs{
					Package: pulumi.String("string"),
					Repo:    pulumi.String("string"),
				},
				Egg: pulumi.String("string"),
				Jar: pulumi.String("string"),
				Maven: &databricks.JobNewClusterLibraryMavenArgs{
					Coordinates: pulumi.String("string"),
					Exclusions: pulumi.StringArray{
						pulumi.String("string"),
					},
					Repo: pulumi.String("string"),
				},
				Pypi: &databricks.JobNewClusterLibraryPypiArgs{
					Package: pulumi.String("string"),
					Repo:    pulumi.String("string"),
				},
				Requirements: pulumi.String("string"),
				Whl:          pulumi.String("string"),
			},
		},
		NodeTypeId:     pulumi.String("string"),
		NumWorkers:     pulumi.Int(0),
		PolicyId:       pulumi.String("string"),
		RuntimeEngine:  pulumi.String("string"),
		SingleUserName: pulumi.String("string"),
		SparkConf: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		SparkEnvVars: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		Autoscale: &databricks.JobNewClusterAutoscaleArgs{
			MaxWorkers: pulumi.Int(0),
			MinWorkers: pulumi.Int(0),
		},
		ApplyPolicyDefaultValues: pulumi.Bool(false),
		UseMlRuntime:             pulumi.Bool(false),
		GcpAttributes: &databricks.JobNewClusterGcpAttributesArgs{
			Availability:            pulumi.String("string"),
			BootDiskSize:            pulumi.Int(0),
			GoogleServiceAccount:    pulumi.String("string"),
			LocalSsdCount:           pulumi.Int(0),
			UsePreemptibleExecutors: pulumi.Bool(false),
			ZoneId:                  pulumi.String("string"),
		},
	},
	NotificationSettings: &databricks.JobNotificationSettingsArgs{
		NoAlertForCanceledRuns: pulumi.Bool(false),
		NoAlertForSkippedRuns:  pulumi.Bool(false),
	},
	Parameters: databricks.JobParameterArray{
		&databricks.JobParameterArgs{
			Default: pulumi.String("string"),
			Name:    pulumi.String("string"),
		},
	},
	PerformanceTarget: pulumi.String("string"),
	Queue: &databricks.JobQueueArgs{
		Enabled: pulumi.Bool(false),
	},
	RunAs: &databricks.JobRunAsArgs{
		ServicePrincipalName: pulumi.String("string"),
		UserName:             pulumi.String("string"),
	},
	Schedule: &databricks.JobScheduleArgs{
		QuartzCronExpression: pulumi.String("string"),
		TimezoneId:           pulumi.String("string"),
		PauseStatus:          pulumi.String("string"),
	},
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Tasks: databricks.JobTaskArray{
		&databricks.JobTaskArgs{
			TaskKey:    pulumi.String("string"),
			MaxRetries: pulumi.Int(0),
			DependsOns: databricks.JobTaskDependsOnArray{
				&databricks.JobTaskDependsOnArgs{
					TaskKey: pulumi.String("string"),
					Outcome: pulumi.String("string"),
				},
			},
			NewCluster: &databricks.JobTaskNewClusterArgs{
				SparkVersion:     pulumi.String("string"),
				IdempotencyToken: pulumi.String("string"),
				SshPublicKeys: pulumi.StringArray{
					pulumi.String("string"),
				},
				AzureAttributes: &databricks.JobTaskNewClusterAzureAttributesArgs{
					Availability:  pulumi.String("string"),
					FirstOnDemand: pulumi.Int(0),
					LogAnalyticsInfo: &databricks.JobTaskNewClusterAzureAttributesLogAnalyticsInfoArgs{
						LogAnalyticsPrimaryKey:  pulumi.String("string"),
						LogAnalyticsWorkspaceId: pulumi.String("string"),
					},
					SpotBidMaxPrice: pulumi.Float64(0),
				},
				ClusterId: pulumi.String("string"),
				ClusterLogConf: &databricks.JobTaskNewClusterClusterLogConfArgs{
					Dbfs: &databricks.JobTaskNewClusterClusterLogConfDbfsArgs{
						Destination: pulumi.String("string"),
					},
					S3: &databricks.JobTaskNewClusterClusterLogConfS3Args{
						Destination:      pulumi.String("string"),
						CannedAcl:        pulumi.String("string"),
						EnableEncryption: pulumi.Bool(false),
						EncryptionType:   pulumi.String("string"),
						Endpoint:         pulumi.String("string"),
						KmsKey:           pulumi.String("string"),
						Region:           pulumi.String("string"),
					},
					Volumes: &databricks.JobTaskNewClusterClusterLogConfVolumesArgs{
						Destination: pulumi.String("string"),
					},
				},
				ClusterMountInfos: databricks.JobTaskNewClusterClusterMountInfoArray{
					&databricks.JobTaskNewClusterClusterMountInfoArgs{
						LocalMountDirPath: pulumi.String("string"),
						NetworkFilesystemInfo: &databricks.JobTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs{
							ServerAddress: pulumi.String("string"),
							MountOptions:  pulumi.String("string"),
						},
						RemoteMountDirPath: pulumi.String("string"),
					},
				},
				InitScripts: databricks.JobTaskNewClusterInitScriptArray{
					&databricks.JobTaskNewClusterInitScriptArgs{
						Abfss: &databricks.JobTaskNewClusterInitScriptAbfssArgs{
							Destination: pulumi.String("string"),
						},
						File: &databricks.JobTaskNewClusterInitScriptFileArgs{
							Destination: pulumi.String("string"),
						},
						Gcs: &databricks.JobTaskNewClusterInitScriptGcsArgs{
							Destination: pulumi.String("string"),
						},
						S3: &databricks.JobTaskNewClusterInitScriptS3Args{
							Destination:      pulumi.String("string"),
							CannedAcl:        pulumi.String("string"),
							EnableEncryption: pulumi.Bool(false),
							EncryptionType:   pulumi.String("string"),
							Endpoint:         pulumi.String("string"),
							KmsKey:           pulumi.String("string"),
							Region:           pulumi.String("string"),
						},
						Volumes: &databricks.JobTaskNewClusterInitScriptVolumesArgs{
							Destination: pulumi.String("string"),
						},
						Workspace: &databricks.JobTaskNewClusterInitScriptWorkspaceArgs{
							Destination: pulumi.String("string"),
						},
					},
				},
				CustomTags: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				DataSecurityMode: pulumi.String("string"),
				DockerImage: &databricks.JobTaskNewClusterDockerImageArgs{
					Url: pulumi.String("string"),
					BasicAuth: &databricks.JobTaskNewClusterDockerImageBasicAuthArgs{
						Password: pulumi.String("string"),
						Username: pulumi.String("string"),
					},
				},
				DriverInstancePoolId:      pulumi.String("string"),
				DriverNodeTypeId:          pulumi.String("string"),
				EnableElasticDisk:         pulumi.Bool(false),
				EnableLocalDiskEncryption: pulumi.Bool(false),
				WorkloadType: &databricks.JobTaskNewClusterWorkloadTypeArgs{
					Clients: &databricks.JobTaskNewClusterWorkloadTypeClientsArgs{
						Jobs:      pulumi.Bool(false),
						Notebooks: pulumi.Bool(false),
					},
				},
				AwsAttributes: &databricks.JobTaskNewClusterAwsAttributesArgs{
					Availability:        pulumi.String("string"),
					EbsVolumeCount:      pulumi.Int(0),
					EbsVolumeIops:       pulumi.Int(0),
					EbsVolumeSize:       pulumi.Int(0),
					EbsVolumeThroughput: pulumi.Int(0),
					EbsVolumeType:       pulumi.String("string"),
					FirstOnDemand:       pulumi.Int(0),
					InstanceProfileArn:  pulumi.String("string"),
					SpotBidPricePercent: pulumi.Int(0),
					ZoneId:              pulumi.String("string"),
				},
				ClusterName:    pulumi.String("string"),
				InstancePoolId: pulumi.String("string"),
				IsSingleNode:   pulumi.Bool(false),
				Kind:           pulumi.String("string"),
				Libraries: databricks.JobTaskNewClusterLibraryArray{
					&databricks.JobTaskNewClusterLibraryArgs{
						Cran: &databricks.JobTaskNewClusterLibraryCranArgs{
							Package: pulumi.String("string"),
							Repo:    pulumi.String("string"),
						},
						Egg: pulumi.String("string"),
						Jar: pulumi.String("string"),
						Maven: &databricks.JobTaskNewClusterLibraryMavenArgs{
							Coordinates: pulumi.String("string"),
							Exclusions: pulumi.StringArray{
								pulumi.String("string"),
							},
							Repo: pulumi.String("string"),
						},
						Pypi: &databricks.JobTaskNewClusterLibraryPypiArgs{
							Package: pulumi.String("string"),
							Repo:    pulumi.String("string"),
						},
						Requirements: pulumi.String("string"),
						Whl:          pulumi.String("string"),
					},
				},
				NodeTypeId:     pulumi.String("string"),
				NumWorkers:     pulumi.Int(0),
				PolicyId:       pulumi.String("string"),
				RuntimeEngine:  pulumi.String("string"),
				SingleUserName: pulumi.String("string"),
				SparkConf: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				SparkEnvVars: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Autoscale: &databricks.JobTaskNewClusterAutoscaleArgs{
					MaxWorkers: pulumi.Int(0),
					MinWorkers: pulumi.Int(0),
				},
				ApplyPolicyDefaultValues: pulumi.Bool(false),
				UseMlRuntime:             pulumi.Bool(false),
				GcpAttributes: &databricks.JobTaskNewClusterGcpAttributesArgs{
					Availability:            pulumi.String("string"),
					BootDiskSize:            pulumi.Int(0),
					GoogleServiceAccount:    pulumi.String("string"),
					LocalSsdCount:           pulumi.Int(0),
					UsePreemptibleExecutors: pulumi.Bool(false),
					ZoneId:                  pulumi.String("string"),
				},
			},
			Description:             pulumi.String("string"),
			DisableAutoOptimization: pulumi.Bool(false),
			EmailNotifications: &databricks.JobTaskEmailNotificationsArgs{
				NoAlertForSkippedRuns: pulumi.Bool(false),
				OnDurationWarningThresholdExceededs: pulumi.StringArray{
					pulumi.String("string"),
				},
				OnFailures: pulumi.StringArray{
					pulumi.String("string"),
				},
				OnStarts: pulumi.StringArray{
					pulumi.String("string"),
				},
				OnStreamingBacklogExceededs: pulumi.StringArray{
					pulumi.String("string"),
				},
				OnSuccesses: pulumi.StringArray{
					pulumi.String("string"),
				},
			},
			EnvironmentKey:    pulumi.String("string"),
			ExistingClusterId: pulumi.String("string"),
			ForEachTask: &databricks.JobTaskForEachTaskArgs{
				Inputs: pulumi.String("string"),
				Task: &databricks.JobTaskForEachTaskTaskArgs{
					TaskKey:                 pulumi.String("string"),
					MinRetryIntervalMillis:  pulumi.Int(0),
					DisableAutoOptimization: pulumi.Bool(false),
					NewCluster: &databricks.JobTaskForEachTaskTaskNewClusterArgs{
						SparkVersion:     pulumi.String("string"),
						IdempotencyToken: pulumi.String("string"),
						SshPublicKeys: pulumi.StringArray{
							pulumi.String("string"),
						},
						AzureAttributes: &databricks.JobTaskForEachTaskTaskNewClusterAzureAttributesArgs{
							Availability:  pulumi.String("string"),
							FirstOnDemand: pulumi.Int(0),
							LogAnalyticsInfo: &databricks.JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfoArgs{
								LogAnalyticsPrimaryKey:  pulumi.String("string"),
								LogAnalyticsWorkspaceId: pulumi.String("string"),
							},
							SpotBidMaxPrice: pulumi.Float64(0),
						},
						ClusterId: pulumi.String("string"),
						ClusterLogConf: &databricks.JobTaskForEachTaskTaskNewClusterClusterLogConfArgs{
							Dbfs: &databricks.JobTaskForEachTaskTaskNewClusterClusterLogConfDbfsArgs{
								Destination: pulumi.String("string"),
							},
							S3: &databricks.JobTaskForEachTaskTaskNewClusterClusterLogConfS3Args{
								Destination:      pulumi.String("string"),
								CannedAcl:        pulumi.String("string"),
								EnableEncryption: pulumi.Bool(false),
								EncryptionType:   pulumi.String("string"),
								Endpoint:         pulumi.String("string"),
								KmsKey:           pulumi.String("string"),
								Region:           pulumi.String("string"),
							},
							Volumes: &databricks.JobTaskForEachTaskTaskNewClusterClusterLogConfVolumesArgs{
								Destination: pulumi.String("string"),
							},
						},
						ClusterMountInfos: databricks.JobTaskForEachTaskTaskNewClusterClusterMountInfoArray{
							&databricks.JobTaskForEachTaskTaskNewClusterClusterMountInfoArgs{
								LocalMountDirPath: pulumi.String("string"),
								NetworkFilesystemInfo: &databricks.JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs{
									ServerAddress: pulumi.String("string"),
									MountOptions:  pulumi.String("string"),
								},
								RemoteMountDirPath: pulumi.String("string"),
							},
						},
						InitScripts: databricks.JobTaskForEachTaskTaskNewClusterInitScriptArray{
							&databricks.JobTaskForEachTaskTaskNewClusterInitScriptArgs{
								Abfss: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptAbfssArgs{
									Destination: pulumi.String("string"),
								},
								File: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptFileArgs{
									Destination: pulumi.String("string"),
								},
								Gcs: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptGcsArgs{
									Destination: pulumi.String("string"),
								},
								S3: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptS3Args{
									Destination:      pulumi.String("string"),
									CannedAcl:        pulumi.String("string"),
									EnableEncryption: pulumi.Bool(false),
									EncryptionType:   pulumi.String("string"),
									Endpoint:         pulumi.String("string"),
									KmsKey:           pulumi.String("string"),
									Region:           pulumi.String("string"),
								},
								Volumes: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptVolumesArgs{
									Destination: pulumi.String("string"),
								},
								Workspace: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptWorkspaceArgs{
									Destination: pulumi.String("string"),
								},
							},
						},
						CustomTags: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						DataSecurityMode: pulumi.String("string"),
						DockerImage: &databricks.JobTaskForEachTaskTaskNewClusterDockerImageArgs{
							Url: pulumi.String("string"),
							BasicAuth: &databricks.JobTaskForEachTaskTaskNewClusterDockerImageBasicAuthArgs{
								Password: pulumi.String("string"),
								Username: pulumi.String("string"),
							},
						},
						DriverInstancePoolId:      pulumi.String("string"),
						DriverNodeTypeId:          pulumi.String("string"),
						EnableElasticDisk:         pulumi.Bool(false),
						EnableLocalDiskEncryption: pulumi.Bool(false),
						WorkloadType: &databricks.JobTaskForEachTaskTaskNewClusterWorkloadTypeArgs{
							Clients: &databricks.JobTaskForEachTaskTaskNewClusterWorkloadTypeClientsArgs{
								Jobs:      pulumi.Bool(false),
								Notebooks: pulumi.Bool(false),
							},
						},
						AwsAttributes: &databricks.JobTaskForEachTaskTaskNewClusterAwsAttributesArgs{
							Availability:        pulumi.String("string"),
							EbsVolumeCount:      pulumi.Int(0),
							EbsVolumeIops:       pulumi.Int(0),
							EbsVolumeSize:       pulumi.Int(0),
							EbsVolumeThroughput: pulumi.Int(0),
							EbsVolumeType:       pulumi.String("string"),
							FirstOnDemand:       pulumi.Int(0),
							InstanceProfileArn:  pulumi.String("string"),
							SpotBidPricePercent: pulumi.Int(0),
							ZoneId:              pulumi.String("string"),
						},
						ClusterName:    pulumi.String("string"),
						InstancePoolId: pulumi.String("string"),
						IsSingleNode:   pulumi.Bool(false),
						Kind:           pulumi.String("string"),
						Libraries: databricks.JobTaskForEachTaskTaskNewClusterLibraryArray{
							&databricks.JobTaskForEachTaskTaskNewClusterLibraryArgs{
								Cran: &databricks.JobTaskForEachTaskTaskNewClusterLibraryCranArgs{
									Package: pulumi.String("string"),
									Repo:    pulumi.String("string"),
								},
								Egg: pulumi.String("string"),
								Jar: pulumi.String("string"),
								Maven: &databricks.JobTaskForEachTaskTaskNewClusterLibraryMavenArgs{
									Coordinates: pulumi.String("string"),
									Exclusions: pulumi.StringArray{
										pulumi.String("string"),
									},
									Repo: pulumi.String("string"),
								},
								Pypi: &databricks.JobTaskForEachTaskTaskNewClusterLibraryPypiArgs{
									Package: pulumi.String("string"),
									Repo:    pulumi.String("string"),
								},
								Requirements: pulumi.String("string"),
								Whl:          pulumi.String("string"),
							},
						},
						NodeTypeId:     pulumi.String("string"),
						NumWorkers:     pulumi.Int(0),
						PolicyId:       pulumi.String("string"),
						RuntimeEngine:  pulumi.String("string"),
						SingleUserName: pulumi.String("string"),
						SparkConf: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						SparkEnvVars: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						Autoscale: &databricks.JobTaskForEachTaskTaskNewClusterAutoscaleArgs{
							MaxWorkers: pulumi.Int(0),
							MinWorkers: pulumi.Int(0),
						},
						ApplyPolicyDefaultValues: pulumi.Bool(false),
						UseMlRuntime:             pulumi.Bool(false),
						GcpAttributes: &databricks.JobTaskForEachTaskTaskNewClusterGcpAttributesArgs{
							Availability:            pulumi.String("string"),
							BootDiskSize:            pulumi.Int(0),
							GoogleServiceAccount:    pulumi.String("string"),
							LocalSsdCount:           pulumi.Int(0),
							UsePreemptibleExecutors: pulumi.Bool(false),
							ZoneId:                  pulumi.String("string"),
						},
					},
					Description: pulumi.String("string"),
					NotebookTask: &databricks.JobTaskForEachTaskTaskNotebookTaskArgs{
						NotebookPath: pulumi.String("string"),
						BaseParameters: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						Source:      pulumi.String("string"),
						WarehouseId: pulumi.String("string"),
					},
					EmailNotifications: &databricks.JobTaskForEachTaskTaskEmailNotificationsArgs{
						NoAlertForSkippedRuns: pulumi.Bool(false),
						OnDurationWarningThresholdExceededs: pulumi.StringArray{
							pulumi.String("string"),
						},
						OnFailures: pulumi.StringArray{
							pulumi.String("string"),
						},
						OnStarts: pulumi.StringArray{
							pulumi.String("string"),
						},
						OnStreamingBacklogExceededs: pulumi.StringArray{
							pulumi.String("string"),
						},
						OnSuccesses: pulumi.StringArray{
							pulumi.String("string"),
						},
					},
					EnvironmentKey:    pulumi.String("string"),
					ExistingClusterId: pulumi.String("string"),
					GenAiComputeTask: &databricks.JobTaskForEachTaskTaskGenAiComputeTaskArgs{
						DlRuntimeImage: pulumi.String("string"),
						Command:        pulumi.String("string"),
						Compute: &databricks.JobTaskForEachTaskTaskGenAiComputeTaskComputeArgs{
							GpuNodePoolId: pulumi.String("string"),
							NumGpus:       pulumi.Int(0),
							GpuType:       pulumi.String("string"),
						},
						MlflowExperimentName:   pulumi.String("string"),
						Source:                 pulumi.String("string"),
						TrainingScriptPath:     pulumi.String("string"),
						YamlParameters:         pulumi.String("string"),
						YamlParametersFilePath: pulumi.String("string"),
					},
					Health: &databricks.JobTaskForEachTaskTaskHealthArgs{
						Rules: databricks.JobTaskForEachTaskTaskHealthRuleArray{
							&databricks.JobTaskForEachTaskTaskHealthRuleArgs{
								Metric: pulumi.String("string"),
								Op:     pulumi.String("string"),
								Value:  pulumi.Int(0),
							},
						},
					},
					JobClusterKey: pulumi.String("string"),
					NotificationSettings: &databricks.JobTaskForEachTaskTaskNotificationSettingsArgs{
						AlertOnLastAttempt:     pulumi.Bool(false),
						NoAlertForCanceledRuns: pulumi.Bool(false),
						NoAlertForSkippedRuns:  pulumi.Bool(false),
					},
					MaxRetries: pulumi.Int(0),
					CleanRoomsNotebookTask: &databricks.JobTaskForEachTaskTaskCleanRoomsNotebookTaskArgs{
						CleanRoomName: pulumi.String("string"),
						NotebookName:  pulumi.String("string"),
						Etag:          pulumi.String("string"),
						NotebookBaseParameters: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
					},
					DependsOns: databricks.JobTaskForEachTaskTaskDependsOnArray{
						&databricks.JobTaskForEachTaskTaskDependsOnArgs{
							TaskKey: pulumi.String("string"),
							Outcome: pulumi.String("string"),
						},
					},
					DbtTask: &databricks.JobTaskForEachTaskTaskDbtTaskArgs{
						Commands: pulumi.StringArray{
							pulumi.String("string"),
						},
						Catalog:           pulumi.String("string"),
						ProfilesDirectory: pulumi.String("string"),
						ProjectDirectory:  pulumi.String("string"),
						Schema:            pulumi.String("string"),
						Source:            pulumi.String("string"),
						WarehouseId:       pulumi.String("string"),
					},
					Libraries: databricks.JobTaskForEachTaskTaskLibraryArray{
						&databricks.JobTaskForEachTaskTaskLibraryArgs{
							Cran: &databricks.JobTaskForEachTaskTaskLibraryCranArgs{
								Package: pulumi.String("string"),
								Repo:    pulumi.String("string"),
							},
							Egg: pulumi.String("string"),
							Jar: pulumi.String("string"),
							Maven: &databricks.JobTaskForEachTaskTaskLibraryMavenArgs{
								Coordinates: pulumi.String("string"),
								Exclusions: pulumi.StringArray{
									pulumi.String("string"),
								},
								Repo: pulumi.String("string"),
							},
							Pypi: &databricks.JobTaskForEachTaskTaskLibraryPypiArgs{
								Package: pulumi.String("string"),
								Repo:    pulumi.String("string"),
							},
							Requirements: pulumi.String("string"),
							Whl:          pulumi.String("string"),
						},
					},
					PipelineTask: &databricks.JobTaskForEachTaskTaskPipelineTaskArgs{
						PipelineId:  pulumi.String("string"),
						FullRefresh: pulumi.Bool(false),
					},
					PythonWheelTask: &databricks.JobTaskForEachTaskTaskPythonWheelTaskArgs{
						EntryPoint: pulumi.String("string"),
						NamedParameters: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						PackageName: pulumi.String("string"),
						Parameters: pulumi.StringArray{
							pulumi.String("string"),
						},
					},
					RetryOnTimeout: pulumi.Bool(false),
					RunIf:          pulumi.String("string"),
					RunJobTask: &databricks.JobTaskForEachTaskTaskRunJobTaskArgs{
						JobId: pulumi.Int(0),
						DbtCommands: pulumi.StringArray{
							pulumi.String("string"),
						},
						JarParams: pulumi.StringArray{
							pulumi.String("string"),
						},
						JobParameters: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						NotebookParams: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						PipelineParams: &databricks.JobTaskForEachTaskTaskRunJobTaskPipelineParamsArgs{
							FullRefresh: pulumi.Bool(false),
						},
						PythonNamedParams: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						PythonParams: pulumi.StringArray{
							pulumi.String("string"),
						},
						SparkSubmitParams: pulumi.StringArray{
							pulumi.String("string"),
						},
						SqlParams: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
					},
					SparkJarTask: &databricks.JobTaskForEachTaskTaskSparkJarTaskArgs{
						JarUri:        pulumi.String("string"),
						MainClassName: pulumi.String("string"),
						Parameters: pulumi.StringArray{
							pulumi.String("string"),
						},
						RunAsRepl: pulumi.Bool(false),
					},
					SparkPythonTask: &databricks.JobTaskForEachTaskTaskSparkPythonTaskArgs{
						PythonFile: pulumi.String("string"),
						Parameters: pulumi.StringArray{
							pulumi.String("string"),
						},
						Source: pulumi.String("string"),
					},
					SparkSubmitTask: &databricks.JobTaskForEachTaskTaskSparkSubmitTaskArgs{
						Parameters: pulumi.StringArray{
							pulumi.String("string"),
						},
					},
					SqlTask: &databricks.JobTaskForEachTaskTaskSqlTaskArgs{
						WarehouseId: pulumi.String("string"),
						Alert: &databricks.JobTaskForEachTaskTaskSqlTaskAlertArgs{
							AlertId:            pulumi.String("string"),
							PauseSubscriptions: pulumi.Bool(false),
							Subscriptions: databricks.JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArgs{
									DestinationId: pulumi.String("string"),
									UserName:      pulumi.String("string"),
								},
							},
						},
						Dashboard: &databricks.JobTaskForEachTaskTaskSqlTaskDashboardArgs{
							DashboardId:        pulumi.String("string"),
							CustomSubject:      pulumi.String("string"),
							PauseSubscriptions: pulumi.Bool(false),
							Subscriptions: databricks.JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArgs{
									DestinationId: pulumi.String("string"),
									UserName:      pulumi.String("string"),
								},
							},
						},
						File: &databricks.JobTaskForEachTaskTaskSqlTaskFileArgs{
							Path:   pulumi.String("string"),
							Source: pulumi.String("string"),
						},
						Parameters: pulumi.StringMap{
							"string": pulumi.String("string"),
						},
						Query: &databricks.JobTaskForEachTaskTaskSqlTaskQueryArgs{
							QueryId: pulumi.String("string"),
						},
					},
					ConditionTask: &databricks.JobTaskForEachTaskTaskConditionTaskArgs{
						Left:  pulumi.String("string"),
						Op:    pulumi.String("string"),
						Right: pulumi.String("string"),
					},
					TimeoutSeconds: pulumi.Int(0),
					WebhookNotifications: &databricks.JobTaskForEachTaskTaskWebhookNotificationsArgs{
						OnDurationWarningThresholdExceededs: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArray{
							&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs{
								Id: pulumi.String("string"),
							},
						},
						OnFailures: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnFailureArray{
							&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs{
								Id: pulumi.String("string"),
							},
						},
						OnStarts: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnStartArray{
							&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs{
								Id: pulumi.String("string"),
							},
						},
						OnStreamingBacklogExceededs: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray{
							&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs{
								Id: pulumi.String("string"),
							},
						},
						OnSuccesses: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArray{
							&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs{
								Id: pulumi.String("string"),
							},
						},
					},
				},
				Concurrency: pulumi.Int(0),
			},
			GenAiComputeTask: &databricks.JobTaskGenAiComputeTaskArgs{
				DlRuntimeImage: pulumi.String("string"),
				Command:        pulumi.String("string"),
				Compute: &databricks.JobTaskGenAiComputeTaskComputeArgs{
					GpuNodePoolId: pulumi.String("string"),
					NumGpus:       pulumi.Int(0),
					GpuType:       pulumi.String("string"),
				},
				MlflowExperimentName:   pulumi.String("string"),
				Source:                 pulumi.String("string"),
				TrainingScriptPath:     pulumi.String("string"),
				YamlParameters:         pulumi.String("string"),
				YamlParametersFilePath: pulumi.String("string"),
			},
			Health: &databricks.JobTaskHealthArgs{
				Rules: databricks.JobTaskHealthRuleArray{
					&databricks.JobTaskHealthRuleArgs{
						Metric: pulumi.String("string"),
						Op:     pulumi.String("string"),
						Value:  pulumi.Int(0),
					},
				},
			},
			JobClusterKey: pulumi.String("string"),
			Libraries: databricks.JobTaskLibraryArray{
				&databricks.JobTaskLibraryArgs{
					Cran: &databricks.JobTaskLibraryCranArgs{
						Package: pulumi.String("string"),
						Repo:    pulumi.String("string"),
					},
					Egg: pulumi.String("string"),
					Jar: pulumi.String("string"),
					Maven: &databricks.JobTaskLibraryMavenArgs{
						Coordinates: pulumi.String("string"),
						Exclusions: pulumi.StringArray{
							pulumi.String("string"),
						},
						Repo: pulumi.String("string"),
					},
					Pypi: &databricks.JobTaskLibraryPypiArgs{
						Package: pulumi.String("string"),
						Repo:    pulumi.String("string"),
					},
					Requirements: pulumi.String("string"),
					Whl:          pulumi.String("string"),
				},
			},
			CleanRoomsNotebookTask: &databricks.JobTaskCleanRoomsNotebookTaskArgs{
				CleanRoomName: pulumi.String("string"),
				NotebookName:  pulumi.String("string"),
				Etag:          pulumi.String("string"),
				NotebookBaseParameters: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
			},
			WebhookNotifications: &databricks.JobTaskWebhookNotificationsArgs{
				OnDurationWarningThresholdExceededs: databricks.JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArray{
					&databricks.JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs{
						Id: pulumi.String("string"),
					},
				},
				OnFailures: databricks.JobTaskWebhookNotificationsOnFailureArray{
					&databricks.JobTaskWebhookNotificationsOnFailureArgs{
						Id: pulumi.String("string"),
					},
				},
				OnStarts: databricks.JobTaskWebhookNotificationsOnStartArray{
					&databricks.JobTaskWebhookNotificationsOnStartArgs{
						Id: pulumi.String("string"),
					},
				},
				OnStreamingBacklogExceededs: databricks.JobTaskWebhookNotificationsOnStreamingBacklogExceededArray{
					&databricks.JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs{
						Id: pulumi.String("string"),
					},
				},
				OnSuccesses: databricks.JobTaskWebhookNotificationsOnSuccessArray{
					&databricks.JobTaskWebhookNotificationsOnSuccessArgs{
						Id: pulumi.String("string"),
					},
				},
			},
			DbtTask: &databricks.JobTaskDbtTaskArgs{
				Commands: pulumi.StringArray{
					pulumi.String("string"),
				},
				Catalog:           pulumi.String("string"),
				ProfilesDirectory: pulumi.String("string"),
				ProjectDirectory:  pulumi.String("string"),
				Schema:            pulumi.String("string"),
				Source:            pulumi.String("string"),
				WarehouseId:       pulumi.String("string"),
			},
			NotebookTask: &databricks.JobTaskNotebookTaskArgs{
				NotebookPath: pulumi.String("string"),
				BaseParameters: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Source:      pulumi.String("string"),
				WarehouseId: pulumi.String("string"),
			},
			NotificationSettings: &databricks.JobTaskNotificationSettingsArgs{
				AlertOnLastAttempt:     pulumi.Bool(false),
				NoAlertForCanceledRuns: pulumi.Bool(false),
				NoAlertForSkippedRuns:  pulumi.Bool(false),
			},
			PipelineTask: &databricks.JobTaskPipelineTaskArgs{
				PipelineId:  pulumi.String("string"),
				FullRefresh: pulumi.Bool(false),
			},
			PythonWheelTask: &databricks.JobTaskPythonWheelTaskArgs{
				EntryPoint: pulumi.String("string"),
				NamedParameters: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				PackageName: pulumi.String("string"),
				Parameters: pulumi.StringArray{
					pulumi.String("string"),
				},
			},
			RetryOnTimeout: pulumi.Bool(false),
			RunIf:          pulumi.String("string"),
			RunJobTask: &databricks.JobTaskRunJobTaskArgs{
				JobId: pulumi.Int(0),
				DbtCommands: pulumi.StringArray{
					pulumi.String("string"),
				},
				JarParams: pulumi.StringArray{
					pulumi.String("string"),
				},
				JobParameters: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				NotebookParams: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				PipelineParams: &databricks.JobTaskRunJobTaskPipelineParamsArgs{
					FullRefresh: pulumi.Bool(false),
				},
				PythonNamedParams: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				PythonParams: pulumi.StringArray{
					pulumi.String("string"),
				},
				SparkSubmitParams: pulumi.StringArray{
					pulumi.String("string"),
				},
				SqlParams: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
			},
			SparkJarTask: &databricks.JobTaskSparkJarTaskArgs{
				JarUri:        pulumi.String("string"),
				MainClassName: pulumi.String("string"),
				Parameters: pulumi.StringArray{
					pulumi.String("string"),
				},
				RunAsRepl: pulumi.Bool(false),
			},
			SparkPythonTask: &databricks.JobTaskSparkPythonTaskArgs{
				PythonFile: pulumi.String("string"),
				Parameters: pulumi.StringArray{
					pulumi.String("string"),
				},
				Source: pulumi.String("string"),
			},
			SparkSubmitTask: &databricks.JobTaskSparkSubmitTaskArgs{
				Parameters: pulumi.StringArray{
					pulumi.String("string"),
				},
			},
			SqlTask: &databricks.JobTaskSqlTaskArgs{
				WarehouseId: pulumi.String("string"),
				Alert: &databricks.JobTaskSqlTaskAlertArgs{
					AlertId:            pulumi.String("string"),
					PauseSubscriptions: pulumi.Bool(false),
					Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
						&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
							DestinationId: pulumi.String("string"),
							UserName:      pulumi.String("string"),
						},
					},
				},
				Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
					DashboardId:        pulumi.String("string"),
					CustomSubject:      pulumi.String("string"),
					PauseSubscriptions: pulumi.Bool(false),
					Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
						&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
							DestinationId: pulumi.String("string"),
							UserName:      pulumi.String("string"),
						},
					},
				},
				File: &databricks.JobTaskSqlTaskFileArgs{
					Path:   pulumi.String("string"),
					Source: pulumi.String("string"),
				},
				Parameters: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Query: &databricks.JobTaskSqlTaskQueryArgs{
					QueryId: pulumi.String("string"),
				},
			},
			ConditionTask: &databricks.JobTaskConditionTaskArgs{
				Left:  pulumi.String("string"),
				Op:    pulumi.String("string"),
				Right: pulumi.String("string"),
			},
			TimeoutSeconds:         pulumi.Int(0),
			MinRetryIntervalMillis: pulumi.Int(0),
		},
	},
	TimeoutSeconds: pulumi.Int(0),
	Trigger: &databricks.JobTriggerArgs{
		FileArrival: &databricks.JobTriggerFileArrivalArgs{
			Url:                           pulumi.String("string"),
			MinTimeBetweenTriggersSeconds: pulumi.Int(0),
			WaitAfterLastChangeSeconds:    pulumi.Int(0),
		},
		PauseStatus: pulumi.String("string"),
		Periodic: &databricks.JobTriggerPeriodicArgs{
			Interval: pulumi.Int(0),
			Unit:     pulumi.String("string"),
		},
		Table: &databricks.JobTriggerTableArgs{
			Condition:                     pulumi.String("string"),
			MinTimeBetweenTriggersSeconds: pulumi.Int(0),
			TableNames: pulumi.StringArray{
				pulumi.String("string"),
			},
			WaitAfterLastChangeSeconds: pulumi.Int(0),
		},
		TableUpdate: &databricks.JobTriggerTableUpdateArgs{
			TableNames: pulumi.StringArray{
				pulumi.String("string"),
			},
			Condition:                     pulumi.String("string"),
			MinTimeBetweenTriggersSeconds: pulumi.Int(0),
			WaitAfterLastChangeSeconds:    pulumi.Int(0),
		},
	},
	WebhookNotifications: &databricks.JobWebhookNotificationsArgs{
		OnDurationWarningThresholdExceededs: databricks.JobWebhookNotificationsOnDurationWarningThresholdExceededArray{
			&databricks.JobWebhookNotificationsOnDurationWarningThresholdExceededArgs{
				Id: pulumi.String("string"),
			},
		},
		OnFailures: databricks.JobWebhookNotificationsOnFailureArray{
			&databricks.JobWebhookNotificationsOnFailureArgs{
				Id: pulumi.String("string"),
			},
		},
		OnStarts: databricks.JobWebhookNotificationsOnStartArray{
			&databricks.JobWebhookNotificationsOnStartArgs{
				Id: pulumi.String("string"),
			},
		},
		OnStreamingBacklogExceededs: databricks.JobWebhookNotificationsOnStreamingBacklogExceededArray{
			&databricks.JobWebhookNotificationsOnStreamingBacklogExceededArgs{
				Id: pulumi.String("string"),
			},
		},
		OnSuccesses: databricks.JobWebhookNotificationsOnSuccessArray{
			&databricks.JobWebhookNotificationsOnSuccessArgs{
				Id: pulumi.String("string"),
			},
		},
	},
})
Copy
var jobResource = new Job("jobResource", JobArgs.builder()
    .budgetPolicyId("string")
    .continuous(JobContinuousArgs.builder()
        .pauseStatus("string")
        .build())
    .controlRunState(false)
    .deployment(JobDeploymentArgs.builder()
        .kind("string")
        .metadataFilePath("string")
        .build())
    .description("string")
    .editMode("string")
    .emailNotifications(JobEmailNotificationsArgs.builder()
        .noAlertForSkippedRuns(false)
        .onDurationWarningThresholdExceededs("string")
        .onFailures("string")
        .onStarts("string")
        .onStreamingBacklogExceededs("string")
        .onSuccesses("string")
        .build())
    .environments(JobEnvironmentArgs.builder()
        .environmentKey("string")
        .spec(JobEnvironmentSpecArgs.builder()
            .client("string")
            .dependencies("string")
            .build())
        .build())
    .existingClusterId("string")
    .format("string")
    .gitSource(JobGitSourceArgs.builder()
        .url("string")
        .branch("string")
        .commit("string")
        .gitSnapshot(JobGitSourceGitSnapshotArgs.builder()
            .usedCommit("string")
            .build())
        .jobSource(JobGitSourceJobSourceArgs.builder()
            .importFromGitBranch("string")
            .jobConfigPath("string")
            .dirtyState("string")
            .build())
        .provider("string")
        .tag("string")
        .build())
    .health(JobHealthArgs.builder()
        .rules(JobHealthRuleArgs.builder()
            .metric("string")
            .op("string")
            .value(0)
            .build())
        .build())
    .jobClusters(JobJobClusterArgs.builder()
        .jobClusterKey("string")
        .newCluster(JobJobClusterNewClusterArgs.builder()
            .sparkVersion("string")
            .idempotencyToken("string")
            .sshPublicKeys("string")
            .azureAttributes(JobJobClusterNewClusterAzureAttributesArgs.builder()
                .availability("string")
                .firstOnDemand(0)
                .logAnalyticsInfo(JobJobClusterNewClusterAzureAttributesLogAnalyticsInfoArgs.builder()
                    .logAnalyticsPrimaryKey("string")
                    .logAnalyticsWorkspaceId("string")
                    .build())
                .spotBidMaxPrice(0)
                .build())
            .clusterId("string")
            .clusterLogConf(JobJobClusterNewClusterClusterLogConfArgs.builder()
                .dbfs(JobJobClusterNewClusterClusterLogConfDbfsArgs.builder()
                    .destination("string")
                    .build())
                .s3(JobJobClusterNewClusterClusterLogConfS3Args.builder()
                    .destination("string")
                    .cannedAcl("string")
                    .enableEncryption(false)
                    .encryptionType("string")
                    .endpoint("string")
                    .kmsKey("string")
                    .region("string")
                    .build())
                .volumes(JobJobClusterNewClusterClusterLogConfVolumesArgs.builder()
                    .destination("string")
                    .build())
                .build())
            .clusterMountInfos(JobJobClusterNewClusterClusterMountInfoArgs.builder()
                .localMountDirPath("string")
                .networkFilesystemInfo(JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                    .serverAddress("string")
                    .mountOptions("string")
                    .build())
                .remoteMountDirPath("string")
                .build())
            .initScripts(JobJobClusterNewClusterInitScriptArgs.builder()
                .abfss(JobJobClusterNewClusterInitScriptAbfssArgs.builder()
                    .destination("string")
                    .build())
                .file(JobJobClusterNewClusterInitScriptFileArgs.builder()
                    .destination("string")
                    .build())
                .gcs(JobJobClusterNewClusterInitScriptGcsArgs.builder()
                    .destination("string")
                    .build())
                .s3(JobJobClusterNewClusterInitScriptS3Args.builder()
                    .destination("string")
                    .cannedAcl("string")
                    .enableEncryption(false)
                    .encryptionType("string")
                    .endpoint("string")
                    .kmsKey("string")
                    .region("string")
                    .build())
                .volumes(JobJobClusterNewClusterInitScriptVolumesArgs.builder()
                    .destination("string")
                    .build())
                .workspace(JobJobClusterNewClusterInitScriptWorkspaceArgs.builder()
                    .destination("string")
                    .build())
                .build())
            .customTags(Map.of("string", "string"))
            .dataSecurityMode("string")
            .dockerImage(JobJobClusterNewClusterDockerImageArgs.builder()
                .url("string")
                .basicAuth(JobJobClusterNewClusterDockerImageBasicAuthArgs.builder()
                    .password("string")
                    .username("string")
                    .build())
                .build())
            .driverInstancePoolId("string")
            .driverNodeTypeId("string")
            .enableElasticDisk(false)
            .enableLocalDiskEncryption(false)
            .workloadType(JobJobClusterNewClusterWorkloadTypeArgs.builder()
                .clients(JobJobClusterNewClusterWorkloadTypeClientsArgs.builder()
                    .jobs(false)
                    .notebooks(false)
                    .build())
                .build())
            .awsAttributes(JobJobClusterNewClusterAwsAttributesArgs.builder()
                .availability("string")
                .ebsVolumeCount(0)
                .ebsVolumeIops(0)
                .ebsVolumeSize(0)
                .ebsVolumeThroughput(0)
                .ebsVolumeType("string")
                .firstOnDemand(0)
                .instanceProfileArn("string")
                .spotBidPricePercent(0)
                .zoneId("string")
                .build())
            .clusterName("string")
            .instancePoolId("string")
            .isSingleNode(false)
            .kind("string")
            .libraries(JobJobClusterNewClusterLibraryArgs.builder()
                .cran(JobJobClusterNewClusterLibraryCranArgs.builder()
                    .package_("string")
                    .repo("string")
                    .build())
                .egg("string")
                .jar("string")
                .maven(JobJobClusterNewClusterLibraryMavenArgs.builder()
                    .coordinates("string")
                    .exclusions("string")
                    .repo("string")
                    .build())
                .pypi(JobJobClusterNewClusterLibraryPypiArgs.builder()
                    .package_("string")
                    .repo("string")
                    .build())
                .requirements("string")
                .whl("string")
                .build())
            .nodeTypeId("string")
            .numWorkers(0)
            .policyId("string")
            .runtimeEngine("string")
            .singleUserName("string")
            .sparkConf(Map.of("string", "string"))
            .sparkEnvVars(Map.of("string", "string"))
            .autoscale(JobJobClusterNewClusterAutoscaleArgs.builder()
                .maxWorkers(0)
                .minWorkers(0)
                .build())
            .applyPolicyDefaultValues(false)
            .useMlRuntime(false)
            .gcpAttributes(JobJobClusterNewClusterGcpAttributesArgs.builder()
                .availability("string")
                .bootDiskSize(0)
                .googleServiceAccount("string")
                .localSsdCount(0)
                .usePreemptibleExecutors(false)
                .zoneId("string")
                .build())
            .build())
        .build())
    .libraries(JobLibraryArgs.builder()
        .cran(JobLibraryCranArgs.builder()
            .package_("string")
            .repo("string")
            .build())
        .egg("string")
        .jar("string")
        .maven(JobLibraryMavenArgs.builder()
            .coordinates("string")
            .exclusions("string")
            .repo("string")
            .build())
        .pypi(JobLibraryPypiArgs.builder()
            .package_("string")
            .repo("string")
            .build())
        .requirements("string")
        .whl("string")
        .build())
    .maxConcurrentRuns(0)
    .name("string")
    .newCluster(JobNewClusterArgs.builder()
        .sparkVersion("string")
        .idempotencyToken("string")
        .sshPublicKeys("string")
        .azureAttributes(JobNewClusterAzureAttributesArgs.builder()
            .availability("string")
            .firstOnDemand(0)
            .logAnalyticsInfo(JobNewClusterAzureAttributesLogAnalyticsInfoArgs.builder()
                .logAnalyticsPrimaryKey("string")
                .logAnalyticsWorkspaceId("string")
                .build())
            .spotBidMaxPrice(0)
            .build())
        .clusterId("string")
        .clusterLogConf(JobNewClusterClusterLogConfArgs.builder()
            .dbfs(JobNewClusterClusterLogConfDbfsArgs.builder()
                .destination("string")
                .build())
            .s3(JobNewClusterClusterLogConfS3Args.builder()
                .destination("string")
                .cannedAcl("string")
                .enableEncryption(false)
                .encryptionType("string")
                .endpoint("string")
                .kmsKey("string")
                .region("string")
                .build())
            .volumes(JobNewClusterClusterLogConfVolumesArgs.builder()
                .destination("string")
                .build())
            .build())
        .clusterMountInfos(JobNewClusterClusterMountInfoArgs.builder()
            .localMountDirPath("string")
            .networkFilesystemInfo(JobNewClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                .serverAddress("string")
                .mountOptions("string")
                .build())
            .remoteMountDirPath("string")
            .build())
        .initScripts(JobNewClusterInitScriptArgs.builder()
            .abfss(JobNewClusterInitScriptAbfssArgs.builder()
                .destination("string")
                .build())
            .file(JobNewClusterInitScriptFileArgs.builder()
                .destination("string")
                .build())
            .gcs(JobNewClusterInitScriptGcsArgs.builder()
                .destination("string")
                .build())
            .s3(JobNewClusterInitScriptS3Args.builder()
                .destination("string")
                .cannedAcl("string")
                .enableEncryption(false)
                .encryptionType("string")
                .endpoint("string")
                .kmsKey("string")
                .region("string")
                .build())
            .volumes(JobNewClusterInitScriptVolumesArgs.builder()
                .destination("string")
                .build())
            .workspace(JobNewClusterInitScriptWorkspaceArgs.builder()
                .destination("string")
                .build())
            .build())
        .customTags(Map.of("string", "string"))
        .dataSecurityMode("string")
        .dockerImage(JobNewClusterDockerImageArgs.builder()
            .url("string")
            .basicAuth(JobNewClusterDockerImageBasicAuthArgs.builder()
                .password("string")
                .username("string")
                .build())
            .build())
        .driverInstancePoolId("string")
        .driverNodeTypeId("string")
        .enableElasticDisk(false)
        .enableLocalDiskEncryption(false)
        .workloadType(JobNewClusterWorkloadTypeArgs.builder()
            .clients(JobNewClusterWorkloadTypeClientsArgs.builder()
                .jobs(false)
                .notebooks(false)
                .build())
            .build())
        .awsAttributes(JobNewClusterAwsAttributesArgs.builder()
            .availability("string")
            .ebsVolumeCount(0)
            .ebsVolumeIops(0)
            .ebsVolumeSize(0)
            .ebsVolumeThroughput(0)
            .ebsVolumeType("string")
            .firstOnDemand(0)
            .instanceProfileArn("string")
            .spotBidPricePercent(0)
            .zoneId("string")
            .build())
        .clusterName("string")
        .instancePoolId("string")
        .isSingleNode(false)
        .kind("string")
        .libraries(JobNewClusterLibraryArgs.builder()
            .cran(JobNewClusterLibraryCranArgs.builder()
                .package_("string")
                .repo("string")
                .build())
            .egg("string")
            .jar("string")
            .maven(JobNewClusterLibraryMavenArgs.builder()
                .coordinates("string")
                .exclusions("string")
                .repo("string")
                .build())
            .pypi(JobNewClusterLibraryPypiArgs.builder()
                .package_("string")
                .repo("string")
                .build())
            .requirements("string")
            .whl("string")
            .build())
        .nodeTypeId("string")
        .numWorkers(0)
        .policyId("string")
        .runtimeEngine("string")
        .singleUserName("string")
        .sparkConf(Map.of("string", "string"))
        .sparkEnvVars(Map.of("string", "string"))
        .autoscale(JobNewClusterAutoscaleArgs.builder()
            .maxWorkers(0)
            .minWorkers(0)
            .build())
        .applyPolicyDefaultValues(false)
        .useMlRuntime(false)
        .gcpAttributes(JobNewClusterGcpAttributesArgs.builder()
            .availability("string")
            .bootDiskSize(0)
            .googleServiceAccount("string")
            .localSsdCount(0)
            .usePreemptibleExecutors(false)
            .zoneId("string")
            .build())
        .build())
    .notificationSettings(JobNotificationSettingsArgs.builder()
        .noAlertForCanceledRuns(false)
        .noAlertForSkippedRuns(false)
        .build())
    .parameters(JobParameterArgs.builder()
        .default_("string")
        .name("string")
        .build())
    .performanceTarget("string")
    .queue(JobQueueArgs.builder()
        .enabled(false)
        .build())
    .runAs(JobRunAsArgs.builder()
        .servicePrincipalName("string")
        .userName("string")
        .build())
    .schedule(JobScheduleArgs.builder()
        .quartzCronExpression("string")
        .timezoneId("string")
        .pauseStatus("string")
        .build())
    .tags(Map.of("string", "string"))
    .tasks(JobTaskArgs.builder()
        .taskKey("string")
        .maxRetries(0)
        .dependsOns(JobTaskDependsOnArgs.builder()
            .taskKey("string")
            .outcome("string")
            .build())
        .newCluster(JobTaskNewClusterArgs.builder()
            .sparkVersion("string")
            .idempotencyToken("string")
            .sshPublicKeys("string")
            .azureAttributes(JobTaskNewClusterAzureAttributesArgs.builder()
                .availability("string")
                .firstOnDemand(0)
                .logAnalyticsInfo(JobTaskNewClusterAzureAttributesLogAnalyticsInfoArgs.builder()
                    .logAnalyticsPrimaryKey("string")
                    .logAnalyticsWorkspaceId("string")
                    .build())
                .spotBidMaxPrice(0)
                .build())
            .clusterId("string")
            .clusterLogConf(JobTaskNewClusterClusterLogConfArgs.builder()
                .dbfs(JobTaskNewClusterClusterLogConfDbfsArgs.builder()
                    .destination("string")
                    .build())
                .s3(JobTaskNewClusterClusterLogConfS3Args.builder()
                    .destination("string")
                    .cannedAcl("string")
                    .enableEncryption(false)
                    .encryptionType("string")
                    .endpoint("string")
                    .kmsKey("string")
                    .region("string")
                    .build())
                .volumes(JobTaskNewClusterClusterLogConfVolumesArgs.builder()
                    .destination("string")
                    .build())
                .build())
            .clusterMountInfos(JobTaskNewClusterClusterMountInfoArgs.builder()
                .localMountDirPath("string")
                .networkFilesystemInfo(JobTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                    .serverAddress("string")
                    .mountOptions("string")
                    .build())
                .remoteMountDirPath("string")
                .build())
            .initScripts(JobTaskNewClusterInitScriptArgs.builder()
                .abfss(JobTaskNewClusterInitScriptAbfssArgs.builder()
                    .destination("string")
                    .build())
                .file(JobTaskNewClusterInitScriptFileArgs.builder()
                    .destination("string")
                    .build())
                .gcs(JobTaskNewClusterInitScriptGcsArgs.builder()
                    .destination("string")
                    .build())
                .s3(JobTaskNewClusterInitScriptS3Args.builder()
                    .destination("string")
                    .cannedAcl("string")
                    .enableEncryption(false)
                    .encryptionType("string")
                    .endpoint("string")
                    .kmsKey("string")
                    .region("string")
                    .build())
                .volumes(JobTaskNewClusterInitScriptVolumesArgs.builder()
                    .destination("string")
                    .build())
                .workspace(JobTaskNewClusterInitScriptWorkspaceArgs.builder()
                    .destination("string")
                    .build())
                .build())
            .customTags(Map.of("string", "string"))
            .dataSecurityMode("string")
            .dockerImage(JobTaskNewClusterDockerImageArgs.builder()
                .url("string")
                .basicAuth(JobTaskNewClusterDockerImageBasicAuthArgs.builder()
                    .password("string")
                    .username("string")
                    .build())
                .build())
            .driverInstancePoolId("string")
            .driverNodeTypeId("string")
            .enableElasticDisk(false)
            .enableLocalDiskEncryption(false)
            .workloadType(JobTaskNewClusterWorkloadTypeArgs.builder()
                .clients(JobTaskNewClusterWorkloadTypeClientsArgs.builder()
                    .jobs(false)
                    .notebooks(false)
                    .build())
                .build())
            .awsAttributes(JobTaskNewClusterAwsAttributesArgs.builder()
                .availability("string")
                .ebsVolumeCount(0)
                .ebsVolumeIops(0)
                .ebsVolumeSize(0)
                .ebsVolumeThroughput(0)
                .ebsVolumeType("string")
                .firstOnDemand(0)
                .instanceProfileArn("string")
                .spotBidPricePercent(0)
                .zoneId("string")
                .build())
            .clusterName("string")
            .instancePoolId("string")
            .isSingleNode(false)
            .kind("string")
            .libraries(JobTaskNewClusterLibraryArgs.builder()
                .cran(JobTaskNewClusterLibraryCranArgs.builder()
                    .package_("string")
                    .repo("string")
                    .build())
                .egg("string")
                .jar("string")
                .maven(JobTaskNewClusterLibraryMavenArgs.builder()
                    .coordinates("string")
                    .exclusions("string")
                    .repo("string")
                    .build())
                .pypi(JobTaskNewClusterLibraryPypiArgs.builder()
                    .package_("string")
                    .repo("string")
                    .build())
                .requirements("string")
                .whl("string")
                .build())
            .nodeTypeId("string")
            .numWorkers(0)
            .policyId("string")
            .runtimeEngine("string")
            .singleUserName("string")
            .sparkConf(Map.of("string", "string"))
            .sparkEnvVars(Map.of("string", "string"))
            .autoscale(JobTaskNewClusterAutoscaleArgs.builder()
                .maxWorkers(0)
                .minWorkers(0)
                .build())
            .applyPolicyDefaultValues(false)
            .useMlRuntime(false)
            .gcpAttributes(JobTaskNewClusterGcpAttributesArgs.builder()
                .availability("string")
                .bootDiskSize(0)
                .googleServiceAccount("string")
                .localSsdCount(0)
                .usePreemptibleExecutors(false)
                .zoneId("string")
                .build())
            .build())
        .description("string")
        .disableAutoOptimization(false)
        .emailNotifications(JobTaskEmailNotificationsArgs.builder()
            .noAlertForSkippedRuns(false)
            .onDurationWarningThresholdExceededs("string")
            .onFailures("string")
            .onStarts("string")
            .onStreamingBacklogExceededs("string")
            .onSuccesses("string")
            .build())
        .environmentKey("string")
        .existingClusterId("string")
        .forEachTask(JobTaskForEachTaskArgs.builder()
            .inputs("string")
            .task(JobTaskForEachTaskTaskArgs.builder()
                .taskKey("string")
                .minRetryIntervalMillis(0)
                .disableAutoOptimization(false)
                .newCluster(JobTaskForEachTaskTaskNewClusterArgs.builder()
                    .sparkVersion("string")
                    .idempotencyToken("string")
                    .sshPublicKeys("string")
                    .azureAttributes(JobTaskForEachTaskTaskNewClusterAzureAttributesArgs.builder()
                        .availability("string")
                        .firstOnDemand(0)
                        .logAnalyticsInfo(JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfoArgs.builder()
                            .logAnalyticsPrimaryKey("string")
                            .logAnalyticsWorkspaceId("string")
                            .build())
                        .spotBidMaxPrice(0)
                        .build())
                    .clusterId("string")
                    .clusterLogConf(JobTaskForEachTaskTaskNewClusterClusterLogConfArgs.builder()
                        .dbfs(JobTaskForEachTaskTaskNewClusterClusterLogConfDbfsArgs.builder()
                            .destination("string")
                            .build())
                        .s3(JobTaskForEachTaskTaskNewClusterClusterLogConfS3Args.builder()
                            .destination("string")
                            .cannedAcl("string")
                            .enableEncryption(false)
                            .encryptionType("string")
                            .endpoint("string")
                            .kmsKey("string")
                            .region("string")
                            .build())
                        .volumes(JobTaskForEachTaskTaskNewClusterClusterLogConfVolumesArgs.builder()
                            .destination("string")
                            .build())
                        .build())
                    .clusterMountInfos(JobTaskForEachTaskTaskNewClusterClusterMountInfoArgs.builder()
                        .localMountDirPath("string")
                        .networkFilesystemInfo(JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
                            .serverAddress("string")
                            .mountOptions("string")
                            .build())
                        .remoteMountDirPath("string")
                        .build())
                    .initScripts(JobTaskForEachTaskTaskNewClusterInitScriptArgs.builder()
                        .abfss(JobTaskForEachTaskTaskNewClusterInitScriptAbfssArgs.builder()
                            .destination("string")
                            .build())
                        .file(JobTaskForEachTaskTaskNewClusterInitScriptFileArgs.builder()
                            .destination("string")
                            .build())
                        .gcs(JobTaskForEachTaskTaskNewClusterInitScriptGcsArgs.builder()
                            .destination("string")
                            .build())
                        .s3(JobTaskForEachTaskTaskNewClusterInitScriptS3Args.builder()
                            .destination("string")
                            .cannedAcl("string")
                            .enableEncryption(false)
                            .encryptionType("string")
                            .endpoint("string")
                            .kmsKey("string")
                            .region("string")
                            .build())
                        .volumes(JobTaskForEachTaskTaskNewClusterInitScriptVolumesArgs.builder()
                            .destination("string")
                            .build())
                        .workspace(JobTaskForEachTaskTaskNewClusterInitScriptWorkspaceArgs.builder()
                            .destination("string")
                            .build())
                        .build())
                    .customTags(Map.of("string", "string"))
                    .dataSecurityMode("string")
                    .dockerImage(JobTaskForEachTaskTaskNewClusterDockerImageArgs.builder()
                        .url("string")
                        .basicAuth(JobTaskForEachTaskTaskNewClusterDockerImageBasicAuthArgs.builder()
                            .password("string")
                            .username("string")
                            .build())
                        .build())
                    .driverInstancePoolId("string")
                    .driverNodeTypeId("string")
                    .enableElasticDisk(false)
                    .enableLocalDiskEncryption(false)
                    .workloadType(JobTaskForEachTaskTaskNewClusterWorkloadTypeArgs.builder()
                        .clients(JobTaskForEachTaskTaskNewClusterWorkloadTypeClientsArgs.builder()
                            .jobs(false)
                            .notebooks(false)
                            .build())
                        .build())
                    .awsAttributes(JobTaskForEachTaskTaskNewClusterAwsAttributesArgs.builder()
                        .availability("string")
                        .ebsVolumeCount(0)
                        .ebsVolumeIops(0)
                        .ebsVolumeSize(0)
                        .ebsVolumeThroughput(0)
                        .ebsVolumeType("string")
                        .firstOnDemand(0)
                        .instanceProfileArn("string")
                        .spotBidPricePercent(0)
                        .zoneId("string")
                        .build())
                    .clusterName("string")
                    .instancePoolId("string")
                    .isSingleNode(false)
                    .kind("string")
                    .libraries(JobTaskForEachTaskTaskNewClusterLibraryArgs.builder()
                        .cran(JobTaskForEachTaskTaskNewClusterLibraryCranArgs.builder()
                            .package_("string")
                            .repo("string")
                            .build())
                        .egg("string")
                        .jar("string")
                        .maven(JobTaskForEachTaskTaskNewClusterLibraryMavenArgs.builder()
                            .coordinates("string")
                            .exclusions("string")
                            .repo("string")
                            .build())
                        .pypi(JobTaskForEachTaskTaskNewClusterLibraryPypiArgs.builder()
                            .package_("string")
                            .repo("string")
                            .build())
                        .requirements("string")
                        .whl("string")
                        .build())
                    .nodeTypeId("string")
                    .numWorkers(0)
                    .policyId("string")
                    .runtimeEngine("string")
                    .singleUserName("string")
                    .sparkConf(Map.of("string", "string"))
                    .sparkEnvVars(Map.of("string", "string"))
                    .autoscale(JobTaskForEachTaskTaskNewClusterAutoscaleArgs.builder()
                        .maxWorkers(0)
                        .minWorkers(0)
                        .build())
                    .applyPolicyDefaultValues(false)
                    .useMlRuntime(false)
                    .gcpAttributes(JobTaskForEachTaskTaskNewClusterGcpAttributesArgs.builder()
                        .availability("string")
                        .bootDiskSize(0)
                        .googleServiceAccount("string")
                        .localSsdCount(0)
                        .usePreemptibleExecutors(false)
                        .zoneId("string")
                        .build())
                    .build())
                .description("string")
                .notebookTask(JobTaskForEachTaskTaskNotebookTaskArgs.builder()
                    .notebookPath("string")
                    .baseParameters(Map.of("string", "string"))
                    .source("string")
                    .warehouseId("string")
                    .build())
                .emailNotifications(JobTaskForEachTaskTaskEmailNotificationsArgs.builder()
                    .noAlertForSkippedRuns(false)
                    .onDurationWarningThresholdExceededs("string")
                    .onFailures("string")
                    .onStarts("string")
                    .onStreamingBacklogExceededs("string")
                    .onSuccesses("string")
                    .build())
                .environmentKey("string")
                .existingClusterId("string")
                .genAiComputeTask(JobTaskForEachTaskTaskGenAiComputeTaskArgs.builder()
                    .dlRuntimeImage("string")
                    .command("string")
                    .compute(JobTaskForEachTaskTaskGenAiComputeTaskComputeArgs.builder()
                        .gpuNodePoolId("string")
                        .numGpus(0)
                        .gpuType("string")
                        .build())
                    .mlflowExperimentName("string")
                    .source("string")
                    .trainingScriptPath("string")
                    .yamlParameters("string")
                    .yamlParametersFilePath("string")
                    .build())
                .health(JobTaskForEachTaskTaskHealthArgs.builder()
                    .rules(JobTaskForEachTaskTaskHealthRuleArgs.builder()
                        .metric("string")
                        .op("string")
                        .value(0)
                        .build())
                    .build())
                .jobClusterKey("string")
                .notificationSettings(JobTaskForEachTaskTaskNotificationSettingsArgs.builder()
                    .alertOnLastAttempt(false)
                    .noAlertForCanceledRuns(false)
                    .noAlertForSkippedRuns(false)
                    .build())
                .maxRetries(0)
                .cleanRoomsNotebookTask(JobTaskForEachTaskTaskCleanRoomsNotebookTaskArgs.builder()
                    .cleanRoomName("string")
                    .notebookName("string")
                    .etag("string")
                    .notebookBaseParameters(Map.of("string", "string"))
                    .build())
                .dependsOns(JobTaskForEachTaskTaskDependsOnArgs.builder()
                    .taskKey("string")
                    .outcome("string")
                    .build())
                .dbtTask(JobTaskForEachTaskTaskDbtTaskArgs.builder()
                    .commands("string")
                    .catalog("string")
                    .profilesDirectory("string")
                    .projectDirectory("string")
                    .schema("string")
                    .source("string")
                    .warehouseId("string")
                    .build())
                .libraries(JobTaskForEachTaskTaskLibraryArgs.builder()
                    .cran(JobTaskForEachTaskTaskLibraryCranArgs.builder()
                        .package_("string")
                        .repo("string")
                        .build())
                    .egg("string")
                    .jar("string")
                    .maven(JobTaskForEachTaskTaskLibraryMavenArgs.builder()
                        .coordinates("string")
                        .exclusions("string")
                        .repo("string")
                        .build())
                    .pypi(JobTaskForEachTaskTaskLibraryPypiArgs.builder()
                        .package_("string")
                        .repo("string")
                        .build())
                    .requirements("string")
                    .whl("string")
                    .build())
                .pipelineTask(JobTaskForEachTaskTaskPipelineTaskArgs.builder()
                    .pipelineId("string")
                    .fullRefresh(false)
                    .build())
                .pythonWheelTask(JobTaskForEachTaskTaskPythonWheelTaskArgs.builder()
                    .entryPoint("string")
                    .namedParameters(Map.of("string", "string"))
                    .packageName("string")
                    .parameters("string")
                    .build())
                .retryOnTimeout(false)
                .runIf("string")
                .runJobTask(JobTaskForEachTaskTaskRunJobTaskArgs.builder()
                    .jobId(0)
                    .dbtCommands("string")
                    .jarParams("string")
                    .jobParameters(Map.of("string", "string"))
                    .notebookParams(Map.of("string", "string"))
                    .pipelineParams(JobTaskForEachTaskTaskRunJobTaskPipelineParamsArgs.builder()
                        .fullRefresh(false)
                        .build())
                    .pythonNamedParams(Map.of("string", "string"))
                    .pythonParams("string")
                    .sparkSubmitParams("string")
                    .sqlParams(Map.of("string", "string"))
                    .build())
                .sparkJarTask(JobTaskForEachTaskTaskSparkJarTaskArgs.builder()
                    .jarUri("string")
                    .mainClassName("string")
                    .parameters("string")
                    .runAsRepl(false)
                    .build())
                .sparkPythonTask(JobTaskForEachTaskTaskSparkPythonTaskArgs.builder()
                    .pythonFile("string")
                    .parameters("string")
                    .source("string")
                    .build())
                .sparkSubmitTask(JobTaskForEachTaskTaskSparkSubmitTaskArgs.builder()
                    .parameters("string")
                    .build())
                .sqlTask(JobTaskForEachTaskTaskSqlTaskArgs.builder()
                    .warehouseId("string")
                    .alert(JobTaskForEachTaskTaskSqlTaskAlertArgs.builder()
                        .alertId("string")
                        .pauseSubscriptions(false)
                        .subscriptions(JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArgs.builder()
                            .destinationId("string")
                            .userName("string")
                            .build())
                        .build())
                    .dashboard(JobTaskForEachTaskTaskSqlTaskDashboardArgs.builder()
                        .dashboardId("string")
                        .customSubject("string")
                        .pauseSubscriptions(false)
                        .subscriptions(JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArgs.builder()
                            .destinationId("string")
                            .userName("string")
                            .build())
                        .build())
                    .file(JobTaskForEachTaskTaskSqlTaskFileArgs.builder()
                        .path("string")
                        .source("string")
                        .build())
                    .parameters(Map.of("string", "string"))
                    .query(JobTaskForEachTaskTaskSqlTaskQueryArgs.builder()
                        .queryId("string")
                        .build())
                    .build())
                .conditionTask(JobTaskForEachTaskTaskConditionTaskArgs.builder()
                    .left("string")
                    .op("string")
                    .right("string")
                    .build())
                .timeoutSeconds(0)
                .webhookNotifications(JobTaskForEachTaskTaskWebhookNotificationsArgs.builder()
                    .onDurationWarningThresholdExceededs(JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs.builder()
                        .id("string")
                        .build())
                    .onFailures(JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs.builder()
                        .id("string")
                        .build())
                    .onStarts(JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs.builder()
                        .id("string")
                        .build())
                    .onStreamingBacklogExceededs(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.builder()
                        .id("string")
                        .build())
                    .onSuccesses(JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs.builder()
                        .id("string")
                        .build())
                    .build())
                .build())
            .concurrency(0)
            .build())
        .genAiComputeTask(JobTaskGenAiComputeTaskArgs.builder()
            .dlRuntimeImage("string")
            .command("string")
            .compute(JobTaskGenAiComputeTaskComputeArgs.builder()
                .gpuNodePoolId("string")
                .numGpus(0)
                .gpuType("string")
                .build())
            .mlflowExperimentName("string")
            .source("string")
            .trainingScriptPath("string")
            .yamlParameters("string")
            .yamlParametersFilePath("string")
            .build())
        .health(JobTaskHealthArgs.builder()
            .rules(JobTaskHealthRuleArgs.builder()
                .metric("string")
                .op("string")
                .value(0)
                .build())
            .build())
        .jobClusterKey("string")
        .libraries(JobTaskLibraryArgs.builder()
            .cran(JobTaskLibraryCranArgs.builder()
                .package_("string")
                .repo("string")
                .build())
            .egg("string")
            .jar("string")
            .maven(JobTaskLibraryMavenArgs.builder()
                .coordinates("string")
                .exclusions("string")
                .repo("string")
                .build())
            .pypi(JobTaskLibraryPypiArgs.builder()
                .package_("string")
                .repo("string")
                .build())
            .requirements("string")
            .whl("string")
            .build())
        .cleanRoomsNotebookTask(JobTaskCleanRoomsNotebookTaskArgs.builder()
            .cleanRoomName("string")
            .notebookName("string")
            .etag("string")
            .notebookBaseParameters(Map.of("string", "string"))
            .build())
        .webhookNotifications(JobTaskWebhookNotificationsArgs.builder()
            .onDurationWarningThresholdExceededs(JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs.builder()
                .id("string")
                .build())
            .onFailures(JobTaskWebhookNotificationsOnFailureArgs.builder()
                .id("string")
                .build())
            .onStarts(JobTaskWebhookNotificationsOnStartArgs.builder()
                .id("string")
                .build())
            .onStreamingBacklogExceededs(JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.builder()
                .id("string")
                .build())
            .onSuccesses(JobTaskWebhookNotificationsOnSuccessArgs.builder()
                .id("string")
                .build())
            .build())
        .dbtTask(JobTaskDbtTaskArgs.builder()
            .commands("string")
            .catalog("string")
            .profilesDirectory("string")
            .projectDirectory("string")
            .schema("string")
            .source("string")
            .warehouseId("string")
            .build())
        .notebookTask(JobTaskNotebookTaskArgs.builder()
            .notebookPath("string")
            .baseParameters(Map.of("string", "string"))
            .source("string")
            .warehouseId("string")
            .build())
        .notificationSettings(JobTaskNotificationSettingsArgs.builder()
            .alertOnLastAttempt(false)
            .noAlertForCanceledRuns(false)
            .noAlertForSkippedRuns(false)
            .build())
        .pipelineTask(JobTaskPipelineTaskArgs.builder()
            .pipelineId("string")
            .fullRefresh(false)
            .build())
        .pythonWheelTask(JobTaskPythonWheelTaskArgs.builder()
            .entryPoint("string")
            .namedParameters(Map.of("string", "string"))
            .packageName("string")
            .parameters("string")
            .build())
        .retryOnTimeout(false)
        .runIf("string")
        .runJobTask(JobTaskRunJobTaskArgs.builder()
            .jobId(0)
            .dbtCommands("string")
            .jarParams("string")
            .jobParameters(Map.of("string", "string"))
            .notebookParams(Map.of("string", "string"))
            .pipelineParams(JobTaskRunJobTaskPipelineParamsArgs.builder()
                .fullRefresh(false)
                .build())
            .pythonNamedParams(Map.of("string", "string"))
            .pythonParams("string")
            .sparkSubmitParams("string")
            .sqlParams(Map.of("string", "string"))
            .build())
        .sparkJarTask(JobTaskSparkJarTaskArgs.builder()
            .jarUri("string")
            .mainClassName("string")
            .parameters("string")
            .runAsRepl(false)
            .build())
        .sparkPythonTask(JobTaskSparkPythonTaskArgs.builder()
            .pythonFile("string")
            .parameters("string")
            .source("string")
            .build())
        .sparkSubmitTask(JobTaskSparkSubmitTaskArgs.builder()
            .parameters("string")
            .build())
        .sqlTask(JobTaskSqlTaskArgs.builder()
            .warehouseId("string")
            .alert(JobTaskSqlTaskAlertArgs.builder()
                .alertId("string")
                .pauseSubscriptions(false)
                .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                    .destinationId("string")
                    .userName("string")
                    .build())
                .build())
            .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                .dashboardId("string")
                .customSubject("string")
                .pauseSubscriptions(false)
                .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                    .destinationId("string")
                    .userName("string")
                    .build())
                .build())
            .file(JobTaskSqlTaskFileArgs.builder()
                .path("string")
                .source("string")
                .build())
            .parameters(Map.of("string", "string"))
            .query(JobTaskSqlTaskQueryArgs.builder()
                .queryId("string")
                .build())
            .build())
        .conditionTask(JobTaskConditionTaskArgs.builder()
            .left("string")
            .op("string")
            .right("string")
            .build())
        .timeoutSeconds(0)
        .minRetryIntervalMillis(0)
        .build())
    .timeoutSeconds(0)
    .trigger(JobTriggerArgs.builder()
        .fileArrival(JobTriggerFileArrivalArgs.builder()
            .url("string")
            .minTimeBetweenTriggersSeconds(0)
            .waitAfterLastChangeSeconds(0)
            .build())
        .pauseStatus("string")
        .periodic(JobTriggerPeriodicArgs.builder()
            .interval(0)
            .unit("string")
            .build())
        .table(JobTriggerTableArgs.builder()
            .condition("string")
            .minTimeBetweenTriggersSeconds(0)
            .tableNames("string")
            .waitAfterLastChangeSeconds(0)
            .build())
        .tableUpdate(JobTriggerTableUpdateArgs.builder()
            .tableNames("string")
            .condition("string")
            .minTimeBetweenTriggersSeconds(0)
            .waitAfterLastChangeSeconds(0)
            .build())
        .build())
    .webhookNotifications(JobWebhookNotificationsArgs.builder()
        .onDurationWarningThresholdExceededs(JobWebhookNotificationsOnDurationWarningThresholdExceededArgs.builder()
            .id("string")
            .build())
        .onFailures(JobWebhookNotificationsOnFailureArgs.builder()
            .id("string")
            .build())
        .onStarts(JobWebhookNotificationsOnStartArgs.builder()
            .id("string")
            .build())
        .onStreamingBacklogExceededs(JobWebhookNotificationsOnStreamingBacklogExceededArgs.builder()
            .id("string")
            .build())
        .onSuccesses(JobWebhookNotificationsOnSuccessArgs.builder()
            .id("string")
            .build())
        .build())
    .build());
Copy
job_resource = databricks.Job("jobResource",
    budget_policy_id="string",
    continuous={
        "pause_status": "string",
    },
    control_run_state=False,
    deployment={
        "kind": "string",
        "metadata_file_path": "string",
    },
    description="string",
    edit_mode="string",
    email_notifications={
        "no_alert_for_skipped_runs": False,
        "on_duration_warning_threshold_exceededs": ["string"],
        "on_failures": ["string"],
        "on_starts": ["string"],
        "on_streaming_backlog_exceededs": ["string"],
        "on_successes": ["string"],
    },
    environments=[{
        "environment_key": "string",
        "spec": {
            "client": "string",
            "dependencies": ["string"],
        },
    }],
    existing_cluster_id="string",
    format="string",
    git_source={
        "url": "string",
        "branch": "string",
        "commit": "string",
        "git_snapshot": {
            "used_commit": "string",
        },
        "job_source": {
            "import_from_git_branch": "string",
            "job_config_path": "string",
            "dirty_state": "string",
        },
        "provider": "string",
        "tag": "string",
    },
    health={
        "rules": [{
            "metric": "string",
            "op": "string",
            "value": 0,
        }],
    },
    job_clusters=[{
        "job_cluster_key": "string",
        "new_cluster": {
            "spark_version": "string",
            "idempotency_token": "string",
            "ssh_public_keys": ["string"],
            "azure_attributes": {
                "availability": "string",
                "first_on_demand": 0,
                "log_analytics_info": {
                    "log_analytics_primary_key": "string",
                    "log_analytics_workspace_id": "string",
                },
                "spot_bid_max_price": 0,
            },
            "cluster_id": "string",
            "cluster_log_conf": {
                "dbfs": {
                    "destination": "string",
                },
                "s3": {
                    "destination": "string",
                    "canned_acl": "string",
                    "enable_encryption": False,
                    "encryption_type": "string",
                    "endpoint": "string",
                    "kms_key": "string",
                    "region": "string",
                },
                "volumes": {
                    "destination": "string",
                },
            },
            "cluster_mount_infos": [{
                "local_mount_dir_path": "string",
                "network_filesystem_info": {
                    "server_address": "string",
                    "mount_options": "string",
                },
                "remote_mount_dir_path": "string",
            }],
            "init_scripts": [{
                "abfss": {
                    "destination": "string",
                },
                "file": {
                    "destination": "string",
                },
                "gcs": {
                    "destination": "string",
                },
                "s3": {
                    "destination": "string",
                    "canned_acl": "string",
                    "enable_encryption": False,
                    "encryption_type": "string",
                    "endpoint": "string",
                    "kms_key": "string",
                    "region": "string",
                },
                "volumes": {
                    "destination": "string",
                },
                "workspace": {
                    "destination": "string",
                },
            }],
            "custom_tags": {
                "string": "string",
            },
            "data_security_mode": "string",
            "docker_image": {
                "url": "string",
                "basic_auth": {
                    "password": "string",
                    "username": "string",
                },
            },
            "driver_instance_pool_id": "string",
            "driver_node_type_id": "string",
            "enable_elastic_disk": False,
            "enable_local_disk_encryption": False,
            "workload_type": {
                "clients": {
                    "jobs": False,
                    "notebooks": False,
                },
            },
            "aws_attributes": {
                "availability": "string",
                "ebs_volume_count": 0,
                "ebs_volume_iops": 0,
                "ebs_volume_size": 0,
                "ebs_volume_throughput": 0,
                "ebs_volume_type": "string",
                "first_on_demand": 0,
                "instance_profile_arn": "string",
                "spot_bid_price_percent": 0,
                "zone_id": "string",
            },
            "cluster_name": "string",
            "instance_pool_id": "string",
            "is_single_node": False,
            "kind": "string",
            "libraries": [{
                "cran": {
                    "package": "string",
                    "repo": "string",
                },
                "egg": "string",
                "jar": "string",
                "maven": {
                    "coordinates": "string",
                    "exclusions": ["string"],
                    "repo": "string",
                },
                "pypi": {
                    "package": "string",
                    "repo": "string",
                },
                "requirements": "string",
                "whl": "string",
            }],
            "node_type_id": "string",
            "num_workers": 0,
            "policy_id": "string",
            "runtime_engine": "string",
            "single_user_name": "string",
            "spark_conf": {
                "string": "string",
            },
            "spark_env_vars": {
                "string": "string",
            },
            "autoscale": {
                "max_workers": 0,
                "min_workers": 0,
            },
            "apply_policy_default_values": False,
            "use_ml_runtime": False,
            "gcp_attributes": {
                "availability": "string",
                "boot_disk_size": 0,
                "google_service_account": "string",
                "local_ssd_count": 0,
                "use_preemptible_executors": False,
                "zone_id": "string",
            },
        },
    }],
    libraries=[{
        "cran": {
            "package": "string",
            "repo": "string",
        },
        "egg": "string",
        "jar": "string",
        "maven": {
            "coordinates": "string",
            "exclusions": ["string"],
            "repo": "string",
        },
        "pypi": {
            "package": "string",
            "repo": "string",
        },
        "requirements": "string",
        "whl": "string",
    }],
    max_concurrent_runs=0,
    name="string",
    new_cluster={
        "spark_version": "string",
        "idempotency_token": "string",
        "ssh_public_keys": ["string"],
        "azure_attributes": {
            "availability": "string",
            "first_on_demand": 0,
            "log_analytics_info": {
                "log_analytics_primary_key": "string",
                "log_analytics_workspace_id": "string",
            },
            "spot_bid_max_price": 0,
        },
        "cluster_id": "string",
        "cluster_log_conf": {
            "dbfs": {
                "destination": "string",
            },
            "s3": {
                "destination": "string",
                "canned_acl": "string",
                "enable_encryption": False,
                "encryption_type": "string",
                "endpoint": "string",
                "kms_key": "string",
                "region": "string",
            },
            "volumes": {
                "destination": "string",
            },
        },
        "cluster_mount_infos": [{
            "local_mount_dir_path": "string",
            "network_filesystem_info": {
                "server_address": "string",
                "mount_options": "string",
            },
            "remote_mount_dir_path": "string",
        }],
        "init_scripts": [{
            "abfss": {
                "destination": "string",
            },
            "file": {
                "destination": "string",
            },
            "gcs": {
                "destination": "string",
            },
            "s3": {
                "destination": "string",
                "canned_acl": "string",
                "enable_encryption": False,
                "encryption_type": "string",
                "endpoint": "string",
                "kms_key": "string",
                "region": "string",
            },
            "volumes": {
                "destination": "string",
            },
            "workspace": {
                "destination": "string",
            },
        }],
        "custom_tags": {
            "string": "string",
        },
        "data_security_mode": "string",
        "docker_image": {
            "url": "string",
            "basic_auth": {
                "password": "string",
                "username": "string",
            },
        },
        "driver_instance_pool_id": "string",
        "driver_node_type_id": "string",
        "enable_elastic_disk": False,
        "enable_local_disk_encryption": False,
        "workload_type": {
            "clients": {
                "jobs": False,
                "notebooks": False,
            },
        },
        "aws_attributes": {
            "availability": "string",
            "ebs_volume_count": 0,
            "ebs_volume_iops": 0,
            "ebs_volume_size": 0,
            "ebs_volume_throughput": 0,
            "ebs_volume_type": "string",
            "first_on_demand": 0,
            "instance_profile_arn": "string",
            "spot_bid_price_percent": 0,
            "zone_id": "string",
        },
        "cluster_name": "string",
        "instance_pool_id": "string",
        "is_single_node": False,
        "kind": "string",
        "libraries": [{
            "cran": {
                "package": "string",
                "repo": "string",
            },
            "egg": "string",
            "jar": "string",
            "maven": {
                "coordinates": "string",
                "exclusions": ["string"],
                "repo": "string",
            },
            "pypi": {
                "package": "string",
                "repo": "string",
            },
            "requirements": "string",
            "whl": "string",
        }],
        "node_type_id": "string",
        "num_workers": 0,
        "policy_id": "string",
        "runtime_engine": "string",
        "single_user_name": "string",
        "spark_conf": {
            "string": "string",
        },
        "spark_env_vars": {
            "string": "string",
        },
        "autoscale": {
            "max_workers": 0,
            "min_workers": 0,
        },
        "apply_policy_default_values": False,
        "use_ml_runtime": False,
        "gcp_attributes": {
            "availability": "string",
            "boot_disk_size": 0,
            "google_service_account": "string",
            "local_ssd_count": 0,
            "use_preemptible_executors": False,
            "zone_id": "string",
        },
    },
    notification_settings={
        "no_alert_for_canceled_runs": False,
        "no_alert_for_skipped_runs": False,
    },
    parameters=[{
        "default": "string",
        "name": "string",
    }],
    performance_target="string",
    queue={
        "enabled": False,
    },
    run_as={
        "service_principal_name": "string",
        "user_name": "string",
    },
    schedule={
        "quartz_cron_expression": "string",
        "timezone_id": "string",
        "pause_status": "string",
    },
    tags={
        "string": "string",
    },
    tasks=[{
        "task_key": "string",
        "max_retries": 0,
        "depends_ons": [{
            "task_key": "string",
            "outcome": "string",
        }],
        "new_cluster": {
            "spark_version": "string",
            "idempotency_token": "string",
            "ssh_public_keys": ["string"],
            "azure_attributes": {
                "availability": "string",
                "first_on_demand": 0,
                "log_analytics_info": {
                    "log_analytics_primary_key": "string",
                    "log_analytics_workspace_id": "string",
                },
                "spot_bid_max_price": 0,
            },
            "cluster_id": "string",
            "cluster_log_conf": {
                "dbfs": {
                    "destination": "string",
                },
                "s3": {
                    "destination": "string",
                    "canned_acl": "string",
                    "enable_encryption": False,
                    "encryption_type": "string",
                    "endpoint": "string",
                    "kms_key": "string",
                    "region": "string",
                },
                "volumes": {
                    "destination": "string",
                },
            },
            "cluster_mount_infos": [{
                "local_mount_dir_path": "string",
                "network_filesystem_info": {
                    "server_address": "string",
                    "mount_options": "string",
                },
                "remote_mount_dir_path": "string",
            }],
            "init_scripts": [{
                "abfss": {
                    "destination": "string",
                },
                "file": {
                    "destination": "string",
                },
                "gcs": {
                    "destination": "string",
                },
                "s3": {
                    "destination": "string",
                    "canned_acl": "string",
                    "enable_encryption": False,
                    "encryption_type": "string",
                    "endpoint": "string",
                    "kms_key": "string",
                    "region": "string",
                },
                "volumes": {
                    "destination": "string",
                },
                "workspace": {
                    "destination": "string",
                },
            }],
            "custom_tags": {
                "string": "string",
            },
            "data_security_mode": "string",
            "docker_image": {
                "url": "string",
                "basic_auth": {
                    "password": "string",
                    "username": "string",
                },
            },
            "driver_instance_pool_id": "string",
            "driver_node_type_id": "string",
            "enable_elastic_disk": False,
            "enable_local_disk_encryption": False,
            "workload_type": {
                "clients": {
                    "jobs": False,
                    "notebooks": False,
                },
            },
            "aws_attributes": {
                "availability": "string",
                "ebs_volume_count": 0,
                "ebs_volume_iops": 0,
                "ebs_volume_size": 0,
                "ebs_volume_throughput": 0,
                "ebs_volume_type": "string",
                "first_on_demand": 0,
                "instance_profile_arn": "string",
                "spot_bid_price_percent": 0,
                "zone_id": "string",
            },
            "cluster_name": "string",
            "instance_pool_id": "string",
            "is_single_node": False,
            "kind": "string",
            "libraries": [{
                "cran": {
                    "package": "string",
                    "repo": "string",
                },
                "egg": "string",
                "jar": "string",
                "maven": {
                    "coordinates": "string",
                    "exclusions": ["string"],
                    "repo": "string",
                },
                "pypi": {
                    "package": "string",
                    "repo": "string",
                },
                "requirements": "string",
                "whl": "string",
            }],
            "node_type_id": "string",
            "num_workers": 0,
            "policy_id": "string",
            "runtime_engine": "string",
            "single_user_name": "string",
            "spark_conf": {
                "string": "string",
            },
            "spark_env_vars": {
                "string": "string",
            },
            "autoscale": {
                "max_workers": 0,
                "min_workers": 0,
            },
            "apply_policy_default_values": False,
            "use_ml_runtime": False,
            "gcp_attributes": {
                "availability": "string",
                "boot_disk_size": 0,
                "google_service_account": "string",
                "local_ssd_count": 0,
                "use_preemptible_executors": False,
                "zone_id": "string",
            },
        },
        "description": "string",
        "disable_auto_optimization": False,
        "email_notifications": {
            "no_alert_for_skipped_runs": False,
            "on_duration_warning_threshold_exceededs": ["string"],
            "on_failures": ["string"],
            "on_starts": ["string"],
            "on_streaming_backlog_exceededs": ["string"],
            "on_successes": ["string"],
        },
        "environment_key": "string",
        "existing_cluster_id": "string",
        "for_each_task": {
            "inputs": "string",
            "task": {
                "task_key": "string",
                "min_retry_interval_millis": 0,
                "disable_auto_optimization": False,
                "new_cluster": {
                    "spark_version": "string",
                    "idempotency_token": "string",
                    "ssh_public_keys": ["string"],
                    "azure_attributes": {
                        "availability": "string",
                        "first_on_demand": 0,
                        "log_analytics_info": {
                            "log_analytics_primary_key": "string",
                            "log_analytics_workspace_id": "string",
                        },
                        "spot_bid_max_price": 0,
                    },
                    "cluster_id": "string",
                    "cluster_log_conf": {
                        "dbfs": {
                            "destination": "string",
                        },
                        "s3": {
                            "destination": "string",
                            "canned_acl": "string",
                            "enable_encryption": False,
                            "encryption_type": "string",
                            "endpoint": "string",
                            "kms_key": "string",
                            "region": "string",
                        },
                        "volumes": {
                            "destination": "string",
                        },
                    },
                    "cluster_mount_infos": [{
                        "local_mount_dir_path": "string",
                        "network_filesystem_info": {
                            "server_address": "string",
                            "mount_options": "string",
                        },
                        "remote_mount_dir_path": "string",
                    }],
                    "init_scripts": [{
                        "abfss": {
                            "destination": "string",
                        },
                        "file": {
                            "destination": "string",
                        },
                        "gcs": {
                            "destination": "string",
                        },
                        "s3": {
                            "destination": "string",
                            "canned_acl": "string",
                            "enable_encryption": False,
                            "encryption_type": "string",
                            "endpoint": "string",
                            "kms_key": "string",
                            "region": "string",
                        },
                        "volumes": {
                            "destination": "string",
                        },
                        "workspace": {
                            "destination": "string",
                        },
                    }],
                    "custom_tags": {
                        "string": "string",
                    },
                    "data_security_mode": "string",
                    "docker_image": {
                        "url": "string",
                        "basic_auth": {
                            "password": "string",
                            "username": "string",
                        },
                    },
                    "driver_instance_pool_id": "string",
                    "driver_node_type_id": "string",
                    "enable_elastic_disk": False,
                    "enable_local_disk_encryption": False,
                    "workload_type": {
                        "clients": {
                            "jobs": False,
                            "notebooks": False,
                        },
                    },
                    "aws_attributes": {
                        "availability": "string",
                        "ebs_volume_count": 0,
                        "ebs_volume_iops": 0,
                        "ebs_volume_size": 0,
                        "ebs_volume_throughput": 0,
                        "ebs_volume_type": "string",
                        "first_on_demand": 0,
                        "instance_profile_arn": "string",
                        "spot_bid_price_percent": 0,
                        "zone_id": "string",
                    },
                    "cluster_name": "string",
                    "instance_pool_id": "string",
                    "is_single_node": False,
                    "kind": "string",
                    "libraries": [{
                        "cran": {
                            "package": "string",
                            "repo": "string",
                        },
                        "egg": "string",
                        "jar": "string",
                        "maven": {
                            "coordinates": "string",
                            "exclusions": ["string"],
                            "repo": "string",
                        },
                        "pypi": {
                            "package": "string",
                            "repo": "string",
                        },
                        "requirements": "string",
                        "whl": "string",
                    }],
                    "node_type_id": "string",
                    "num_workers": 0,
                    "policy_id": "string",
                    "runtime_engine": "string",
                    "single_user_name": "string",
                    "spark_conf": {
                        "string": "string",
                    },
                    "spark_env_vars": {
                        "string": "string",
                    },
                    "autoscale": {
                        "max_workers": 0,
                        "min_workers": 0,
                    },
                    "apply_policy_default_values": False,
                    "use_ml_runtime": False,
                    "gcp_attributes": {
                        "availability": "string",
                        "boot_disk_size": 0,
                        "google_service_account": "string",
                        "local_ssd_count": 0,
                        "use_preemptible_executors": False,
                        "zone_id": "string",
                    },
                },
                "description": "string",
                "notebook_task": {
                    "notebook_path": "string",
                    "base_parameters": {
                        "string": "string",
                    },
                    "source": "string",
                    "warehouse_id": "string",
                },
                "email_notifications": {
                    "no_alert_for_skipped_runs": False,
                    "on_duration_warning_threshold_exceededs": ["string"],
                    "on_failures": ["string"],
                    "on_starts": ["string"],
                    "on_streaming_backlog_exceededs": ["string"],
                    "on_successes": ["string"],
                },
                "environment_key": "string",
                "existing_cluster_id": "string",
                "gen_ai_compute_task": {
                    "dl_runtime_image": "string",
                    "command": "string",
                    "compute": {
                        "gpu_node_pool_id": "string",
                        "num_gpus": 0,
                        "gpu_type": "string",
                    },
                    "mlflow_experiment_name": "string",
                    "source": "string",
                    "training_script_path": "string",
                    "yaml_parameters": "string",
                    "yaml_parameters_file_path": "string",
                },
                "health": {
                    "rules": [{
                        "metric": "string",
                        "op": "string",
                        "value": 0,
                    }],
                },
                "job_cluster_key": "string",
                "notification_settings": {
                    "alert_on_last_attempt": False,
                    "no_alert_for_canceled_runs": False,
                    "no_alert_for_skipped_runs": False,
                },
                "max_retries": 0,
                "clean_rooms_notebook_task": {
                    "clean_room_name": "string",
                    "notebook_name": "string",
                    "etag": "string",
                    "notebook_base_parameters": {
                        "string": "string",
                    },
                },
                "depends_ons": [{
                    "task_key": "string",
                    "outcome": "string",
                }],
                "dbt_task": {
                    "commands": ["string"],
                    "catalog": "string",
                    "profiles_directory": "string",
                    "project_directory": "string",
                    "schema": "string",
                    "source": "string",
                    "warehouse_id": "string",
                },
                "libraries": [{
                    "cran": {
                        "package": "string",
                        "repo": "string",
                    },
                    "egg": "string",
                    "jar": "string",
                    "maven": {
                        "coordinates": "string",
                        "exclusions": ["string"],
                        "repo": "string",
                    },
                    "pypi": {
                        "package": "string",
                        "repo": "string",
                    },
                    "requirements": "string",
                    "whl": "string",
                }],
                "pipeline_task": {
                    "pipeline_id": "string",
                    "full_refresh": False,
                },
                "python_wheel_task": {
                    "entry_point": "string",
                    "named_parameters": {
                        "string": "string",
                    },
                    "package_name": "string",
                    "parameters": ["string"],
                },
                "retry_on_timeout": False,
                "run_if": "string",
                "run_job_task": {
                    "job_id": 0,
                    "dbt_commands": ["string"],
                    "jar_params": ["string"],
                    "job_parameters": {
                        "string": "string",
                    },
                    "notebook_params": {
                        "string": "string",
                    },
                    "pipeline_params": {
                        "full_refresh": False,
                    },
                    "python_named_params": {
                        "string": "string",
                    },
                    "python_params": ["string"],
                    "spark_submit_params": ["string"],
                    "sql_params": {
                        "string": "string",
                    },
                },
                "spark_jar_task": {
                    "jar_uri": "string",
                    "main_class_name": "string",
                    "parameters": ["string"],
                    "run_as_repl": False,
                },
                "spark_python_task": {
                    "python_file": "string",
                    "parameters": ["string"],
                    "source": "string",
                },
                "spark_submit_task": {
                    "parameters": ["string"],
                },
                "sql_task": {
                    "warehouse_id": "string",
                    "alert": {
                        "alert_id": "string",
                        "pause_subscriptions": False,
                        "subscriptions": [{
                            "destination_id": "string",
                            "user_name": "string",
                        }],
                    },
                    "dashboard": {
                        "dashboard_id": "string",
                        "custom_subject": "string",
                        "pause_subscriptions": False,
                        "subscriptions": [{
                            "destination_id": "string",
                            "user_name": "string",
                        }],
                    },
                    "file": {
                        "path": "string",
                        "source": "string",
                    },
                    "parameters": {
                        "string": "string",
                    },
                    "query": {
                        "query_id": "string",
                    },
                },
                "condition_task": {
                    "left": "string",
                    "op": "string",
                    "right": "string",
                },
                "timeout_seconds": 0,
                "webhook_notifications": {
                    "on_duration_warning_threshold_exceededs": [{
                        "id": "string",
                    }],
                    "on_failures": [{
                        "id": "string",
                    }],
                    "on_starts": [{
                        "id": "string",
                    }],
                    "on_streaming_backlog_exceededs": [{
                        "id": "string",
                    }],
                    "on_successes": [{
                        "id": "string",
                    }],
                },
            },
            "concurrency": 0,
        },
        "gen_ai_compute_task": {
            "dl_runtime_image": "string",
            "command": "string",
            "compute": {
                "gpu_node_pool_id": "string",
                "num_gpus": 0,
                "gpu_type": "string",
            },
            "mlflow_experiment_name": "string",
            "source": "string",
            "training_script_path": "string",
            "yaml_parameters": "string",
            "yaml_parameters_file_path": "string",
        },
        "health": {
            "rules": [{
                "metric": "string",
                "op": "string",
                "value": 0,
            }],
        },
        "job_cluster_key": "string",
        "libraries": [{
            "cran": {
                "package": "string",
                "repo": "string",
            },
            "egg": "string",
            "jar": "string",
            "maven": {
                "coordinates": "string",
                "exclusions": ["string"],
                "repo": "string",
            },
            "pypi": {
                "package": "string",
                "repo": "string",
            },
            "requirements": "string",
            "whl": "string",
        }],
        "clean_rooms_notebook_task": {
            "clean_room_name": "string",
            "notebook_name": "string",
            "etag": "string",
            "notebook_base_parameters": {
                "string": "string",
            },
        },
        "webhook_notifications": {
            "on_duration_warning_threshold_exceededs": [{
                "id": "string",
            }],
            "on_failures": [{
                "id": "string",
            }],
            "on_starts": [{
                "id": "string",
            }],
            "on_streaming_backlog_exceededs": [{
                "id": "string",
            }],
            "on_successes": [{
                "id": "string",
            }],
        },
        "dbt_task": {
            "commands": ["string"],
            "catalog": "string",
            "profiles_directory": "string",
            "project_directory": "string",
            "schema": "string",
            "source": "string",
            "warehouse_id": "string",
        },
        "notebook_task": {
            "notebook_path": "string",
            "base_parameters": {
                "string": "string",
            },
            "source": "string",
            "warehouse_id": "string",
        },
        "notification_settings": {
            "alert_on_last_attempt": False,
            "no_alert_for_canceled_runs": False,
            "no_alert_for_skipped_runs": False,
        },
        "pipeline_task": {
            "pipeline_id": "string",
            "full_refresh": False,
        },
        "python_wheel_task": {
            "entry_point": "string",
            "named_parameters": {
                "string": "string",
            },
            "package_name": "string",
            "parameters": ["string"],
        },
        "retry_on_timeout": False,
        "run_if": "string",
        "run_job_task": {
            "job_id": 0,
            "dbt_commands": ["string"],
            "jar_params": ["string"],
            "job_parameters": {
                "string": "string",
            },
            "notebook_params": {
                "string": "string",
            },
            "pipeline_params": {
                "full_refresh": False,
            },
            "python_named_params": {
                "string": "string",
            },
            "python_params": ["string"],
            "spark_submit_params": ["string"],
            "sql_params": {
                "string": "string",
            },
        },
        "spark_jar_task": {
            "jar_uri": "string",
            "main_class_name": "string",
            "parameters": ["string"],
            "run_as_repl": False,
        },
        "spark_python_task": {
            "python_file": "string",
            "parameters": ["string"],
            "source": "string",
        },
        "spark_submit_task": {
            "parameters": ["string"],
        },
        "sql_task": {
            "warehouse_id": "string",
            "alert": {
                "alert_id": "string",
                "pause_subscriptions": False,
                "subscriptions": [{
                    "destination_id": "string",
                    "user_name": "string",
                }],
            },
            "dashboard": {
                "dashboard_id": "string",
                "custom_subject": "string",
                "pause_subscriptions": False,
                "subscriptions": [{
                    "destination_id": "string",
                    "user_name": "string",
                }],
            },
            "file": {
                "path": "string",
                "source": "string",
            },
            "parameters": {
                "string": "string",
            },
            "query": {
                "query_id": "string",
            },
        },
        "condition_task": {
            "left": "string",
            "op": "string",
            "right": "string",
        },
        "timeout_seconds": 0,
        "min_retry_interval_millis": 0,
    }],
    timeout_seconds=0,
    trigger={
        "file_arrival": {
            "url": "string",
            "min_time_between_triggers_seconds": 0,
            "wait_after_last_change_seconds": 0,
        },
        "pause_status": "string",
        "periodic": {
            "interval": 0,
            "unit": "string",
        },
        "table": {
            "condition": "string",
            "min_time_between_triggers_seconds": 0,
            "table_names": ["string"],
            "wait_after_last_change_seconds": 0,
        },
        "table_update": {
            "table_names": ["string"],
            "condition": "string",
            "min_time_between_triggers_seconds": 0,
            "wait_after_last_change_seconds": 0,
        },
    },
    webhook_notifications={
        "on_duration_warning_threshold_exceededs": [{
            "id": "string",
        }],
        "on_failures": [{
            "id": "string",
        }],
        "on_starts": [{
            "id": "string",
        }],
        "on_streaming_backlog_exceededs": [{
            "id": "string",
        }],
        "on_successes": [{
            "id": "string",
        }],
    })
Copy
const jobResource = new databricks.Job("jobResource", {
    budgetPolicyId: "string",
    continuous: {
        pauseStatus: "string",
    },
    controlRunState: false,
    deployment: {
        kind: "string",
        metadataFilePath: "string",
    },
    description: "string",
    editMode: "string",
    emailNotifications: {
        noAlertForSkippedRuns: false,
        onDurationWarningThresholdExceededs: ["string"],
        onFailures: ["string"],
        onStarts: ["string"],
        onStreamingBacklogExceededs: ["string"],
        onSuccesses: ["string"],
    },
    environments: [{
        environmentKey: "string",
        spec: {
            client: "string",
            dependencies: ["string"],
        },
    }],
    existingClusterId: "string",
    format: "string",
    gitSource: {
        url: "string",
        branch: "string",
        commit: "string",
        gitSnapshot: {
            usedCommit: "string",
        },
        jobSource: {
            importFromGitBranch: "string",
            jobConfigPath: "string",
            dirtyState: "string",
        },
        provider: "string",
        tag: "string",
    },
    health: {
        rules: [{
            metric: "string",
            op: "string",
            value: 0,
        }],
    },
    jobClusters: [{
        jobClusterKey: "string",
        newCluster: {
            sparkVersion: "string",
            idempotencyToken: "string",
            sshPublicKeys: ["string"],
            azureAttributes: {
                availability: "string",
                firstOnDemand: 0,
                logAnalyticsInfo: {
                    logAnalyticsPrimaryKey: "string",
                    logAnalyticsWorkspaceId: "string",
                },
                spotBidMaxPrice: 0,
            },
            clusterId: "string",
            clusterLogConf: {
                dbfs: {
                    destination: "string",
                },
                s3: {
                    destination: "string",
                    cannedAcl: "string",
                    enableEncryption: false,
                    encryptionType: "string",
                    endpoint: "string",
                    kmsKey: "string",
                    region: "string",
                },
                volumes: {
                    destination: "string",
                },
            },
            clusterMountInfos: [{
                localMountDirPath: "string",
                networkFilesystemInfo: {
                    serverAddress: "string",
                    mountOptions: "string",
                },
                remoteMountDirPath: "string",
            }],
            initScripts: [{
                abfss: {
                    destination: "string",
                },
                file: {
                    destination: "string",
                },
                gcs: {
                    destination: "string",
                },
                s3: {
                    destination: "string",
                    cannedAcl: "string",
                    enableEncryption: false,
                    encryptionType: "string",
                    endpoint: "string",
                    kmsKey: "string",
                    region: "string",
                },
                volumes: {
                    destination: "string",
                },
                workspace: {
                    destination: "string",
                },
            }],
            customTags: {
                string: "string",
            },
            dataSecurityMode: "string",
            dockerImage: {
                url: "string",
                basicAuth: {
                    password: "string",
                    username: "string",
                },
            },
            driverInstancePoolId: "string",
            driverNodeTypeId: "string",
            enableElasticDisk: false,
            enableLocalDiskEncryption: false,
            workloadType: {
                clients: {
                    jobs: false,
                    notebooks: false,
                },
            },
            awsAttributes: {
                availability: "string",
                ebsVolumeCount: 0,
                ebsVolumeIops: 0,
                ebsVolumeSize: 0,
                ebsVolumeThroughput: 0,
                ebsVolumeType: "string",
                firstOnDemand: 0,
                instanceProfileArn: "string",
                spotBidPricePercent: 0,
                zoneId: "string",
            },
            clusterName: "string",
            instancePoolId: "string",
            isSingleNode: false,
            kind: "string",
            libraries: [{
                cran: {
                    "package": "string",
                    repo: "string",
                },
                egg: "string",
                jar: "string",
                maven: {
                    coordinates: "string",
                    exclusions: ["string"],
                    repo: "string",
                },
                pypi: {
                    "package": "string",
                    repo: "string",
                },
                requirements: "string",
                whl: "string",
            }],
            nodeTypeId: "string",
            numWorkers: 0,
            policyId: "string",
            runtimeEngine: "string",
            singleUserName: "string",
            sparkConf: {
                string: "string",
            },
            sparkEnvVars: {
                string: "string",
            },
            autoscale: {
                maxWorkers: 0,
                minWorkers: 0,
            },
            applyPolicyDefaultValues: false,
            useMlRuntime: false,
            gcpAttributes: {
                availability: "string",
                bootDiskSize: 0,
                googleServiceAccount: "string",
                localSsdCount: 0,
                usePreemptibleExecutors: false,
                zoneId: "string",
            },
        },
    }],
    libraries: [{
        cran: {
            "package": "string",
            repo: "string",
        },
        egg: "string",
        jar: "string",
        maven: {
            coordinates: "string",
            exclusions: ["string"],
            repo: "string",
        },
        pypi: {
            "package": "string",
            repo: "string",
        },
        requirements: "string",
        whl: "string",
    }],
    maxConcurrentRuns: 0,
    name: "string",
    newCluster: {
        sparkVersion: "string",
        idempotencyToken: "string",
        sshPublicKeys: ["string"],
        azureAttributes: {
            availability: "string",
            firstOnDemand: 0,
            logAnalyticsInfo: {
                logAnalyticsPrimaryKey: "string",
                logAnalyticsWorkspaceId: "string",
            },
            spotBidMaxPrice: 0,
        },
        clusterId: "string",
        clusterLogConf: {
            dbfs: {
                destination: "string",
            },
            s3: {
                destination: "string",
                cannedAcl: "string",
                enableEncryption: false,
                encryptionType: "string",
                endpoint: "string",
                kmsKey: "string",
                region: "string",
            },
            volumes: {
                destination: "string",
            },
        },
        clusterMountInfos: [{
            localMountDirPath: "string",
            networkFilesystemInfo: {
                serverAddress: "string",
                mountOptions: "string",
            },
            remoteMountDirPath: "string",
        }],
        initScripts: [{
            abfss: {
                destination: "string",
            },
            file: {
                destination: "string",
            },
            gcs: {
                destination: "string",
            },
            s3: {
                destination: "string",
                cannedAcl: "string",
                enableEncryption: false,
                encryptionType: "string",
                endpoint: "string",
                kmsKey: "string",
                region: "string",
            },
            volumes: {
                destination: "string",
            },
            workspace: {
                destination: "string",
            },
        }],
        customTags: {
            string: "string",
        },
        dataSecurityMode: "string",
        dockerImage: {
            url: "string",
            basicAuth: {
                password: "string",
                username: "string",
            },
        },
        driverInstancePoolId: "string",
        driverNodeTypeId: "string",
        enableElasticDisk: false,
        enableLocalDiskEncryption: false,
        workloadType: {
            clients: {
                jobs: false,
                notebooks: false,
            },
        },
        awsAttributes: {
            availability: "string",
            ebsVolumeCount: 0,
            ebsVolumeIops: 0,
            ebsVolumeSize: 0,
            ebsVolumeThroughput: 0,
            ebsVolumeType: "string",
            firstOnDemand: 0,
            instanceProfileArn: "string",
            spotBidPricePercent: 0,
            zoneId: "string",
        },
        clusterName: "string",
        instancePoolId: "string",
        isSingleNode: false,
        kind: "string",
        libraries: [{
            cran: {
                "package": "string",
                repo: "string",
            },
            egg: "string",
            jar: "string",
            maven: {
                coordinates: "string",
                exclusions: ["string"],
                repo: "string",
            },
            pypi: {
                "package": "string",
                repo: "string",
            },
            requirements: "string",
            whl: "string",
        }],
        nodeTypeId: "string",
        numWorkers: 0,
        policyId: "string",
        runtimeEngine: "string",
        singleUserName: "string",
        sparkConf: {
            string: "string",
        },
        sparkEnvVars: {
            string: "string",
        },
        autoscale: {
            maxWorkers: 0,
            minWorkers: 0,
        },
        applyPolicyDefaultValues: false,
        useMlRuntime: false,
        gcpAttributes: {
            availability: "string",
            bootDiskSize: 0,
            googleServiceAccount: "string",
            localSsdCount: 0,
            usePreemptibleExecutors: false,
            zoneId: "string",
        },
    },
    notificationSettings: {
        noAlertForCanceledRuns: false,
        noAlertForSkippedRuns: false,
    },
    parameters: [{
        "default": "string",
        name: "string",
    }],
    performanceTarget: "string",
    queue: {
        enabled: false,
    },
    runAs: {
        servicePrincipalName: "string",
        userName: "string",
    },
    schedule: {
        quartzCronExpression: "string",
        timezoneId: "string",
        pauseStatus: "string",
    },
    tags: {
        string: "string",
    },
    tasks: [{
        taskKey: "string",
        maxRetries: 0,
        dependsOns: [{
            taskKey: "string",
            outcome: "string",
        }],
        newCluster: {
            sparkVersion: "string",
            idempotencyToken: "string",
            sshPublicKeys: ["string"],
            azureAttributes: {
                availability: "string",
                firstOnDemand: 0,
                logAnalyticsInfo: {
                    logAnalyticsPrimaryKey: "string",
                    logAnalyticsWorkspaceId: "string",
                },
                spotBidMaxPrice: 0,
            },
            clusterId: "string",
            clusterLogConf: {
                dbfs: {
                    destination: "string",
                },
                s3: {
                    destination: "string",
                    cannedAcl: "string",
                    enableEncryption: false,
                    encryptionType: "string",
                    endpoint: "string",
                    kmsKey: "string",
                    region: "string",
                },
                volumes: {
                    destination: "string",
                },
            },
            clusterMountInfos: [{
                localMountDirPath: "string",
                networkFilesystemInfo: {
                    serverAddress: "string",
                    mountOptions: "string",
                },
                remoteMountDirPath: "string",
            }],
            initScripts: [{
                abfss: {
                    destination: "string",
                },
                file: {
                    destination: "string",
                },
                gcs: {
                    destination: "string",
                },
                s3: {
                    destination: "string",
                    cannedAcl: "string",
                    enableEncryption: false,
                    encryptionType: "string",
                    endpoint: "string",
                    kmsKey: "string",
                    region: "string",
                },
                volumes: {
                    destination: "string",
                },
                workspace: {
                    destination: "string",
                },
            }],
            customTags: {
                string: "string",
            },
            dataSecurityMode: "string",
            dockerImage: {
                url: "string",
                basicAuth: {
                    password: "string",
                    username: "string",
                },
            },
            driverInstancePoolId: "string",
            driverNodeTypeId: "string",
            enableElasticDisk: false,
            enableLocalDiskEncryption: false,
            workloadType: {
                clients: {
                    jobs: false,
                    notebooks: false,
                },
            },
            awsAttributes: {
                availability: "string",
                ebsVolumeCount: 0,
                ebsVolumeIops: 0,
                ebsVolumeSize: 0,
                ebsVolumeThroughput: 0,
                ebsVolumeType: "string",
                firstOnDemand: 0,
                instanceProfileArn: "string",
                spotBidPricePercent: 0,
                zoneId: "string",
            },
            clusterName: "string",
            instancePoolId: "string",
            isSingleNode: false,
            kind: "string",
            libraries: [{
                cran: {
                    "package": "string",
                    repo: "string",
                },
                egg: "string",
                jar: "string",
                maven: {
                    coordinates: "string",
                    exclusions: ["string"],
                    repo: "string",
                },
                pypi: {
                    "package": "string",
                    repo: "string",
                },
                requirements: "string",
                whl: "string",
            }],
            nodeTypeId: "string",
            numWorkers: 0,
            policyId: "string",
            runtimeEngine: "string",
            singleUserName: "string",
            sparkConf: {
                string: "string",
            },
            sparkEnvVars: {
                string: "string",
            },
            autoscale: {
                maxWorkers: 0,
                minWorkers: 0,
            },
            applyPolicyDefaultValues: false,
            useMlRuntime: false,
            gcpAttributes: {
                availability: "string",
                bootDiskSize: 0,
                googleServiceAccount: "string",
                localSsdCount: 0,
                usePreemptibleExecutors: false,
                zoneId: "string",
            },
        },
        description: "string",
        disableAutoOptimization: false,
        emailNotifications: {
            noAlertForSkippedRuns: false,
            onDurationWarningThresholdExceededs: ["string"],
            onFailures: ["string"],
            onStarts: ["string"],
            onStreamingBacklogExceededs: ["string"],
            onSuccesses: ["string"],
        },
        environmentKey: "string",
        existingClusterId: "string",
        forEachTask: {
            inputs: "string",
            task: {
                taskKey: "string",
                minRetryIntervalMillis: 0,
                disableAutoOptimization: false,
                newCluster: {
                    sparkVersion: "string",
                    idempotencyToken: "string",
                    sshPublicKeys: ["string"],
                    azureAttributes: {
                        availability: "string",
                        firstOnDemand: 0,
                        logAnalyticsInfo: {
                            logAnalyticsPrimaryKey: "string",
                            logAnalyticsWorkspaceId: "string",
                        },
                        spotBidMaxPrice: 0,
                    },
                    clusterId: "string",
                    clusterLogConf: {
                        dbfs: {
                            destination: "string",
                        },
                        s3: {
                            destination: "string",
                            cannedAcl: "string",
                            enableEncryption: false,
                            encryptionType: "string",
                            endpoint: "string",
                            kmsKey: "string",
                            region: "string",
                        },
                        volumes: {
                            destination: "string",
                        },
                    },
                    clusterMountInfos: [{
                        localMountDirPath: "string",
                        networkFilesystemInfo: {
                            serverAddress: "string",
                            mountOptions: "string",
                        },
                        remoteMountDirPath: "string",
                    }],
                    initScripts: [{
                        abfss: {
                            destination: "string",
                        },
                        file: {
                            destination: "string",
                        },
                        gcs: {
                            destination: "string",
                        },
                        s3: {
                            destination: "string",
                            cannedAcl: "string",
                            enableEncryption: false,
                            encryptionType: "string",
                            endpoint: "string",
                            kmsKey: "string",
                            region: "string",
                        },
                        volumes: {
                            destination: "string",
                        },
                        workspace: {
                            destination: "string",
                        },
                    }],
                    customTags: {
                        string: "string",
                    },
                    dataSecurityMode: "string",
                    dockerImage: {
                        url: "string",
                        basicAuth: {
                            password: "string",
                            username: "string",
                        },
                    },
                    driverInstancePoolId: "string",
                    driverNodeTypeId: "string",
                    enableElasticDisk: false,
                    enableLocalDiskEncryption: false,
                    workloadType: {
                        clients: {
                            jobs: false,
                            notebooks: false,
                        },
                    },
                    awsAttributes: {
                        availability: "string",
                        ebsVolumeCount: 0,
                        ebsVolumeIops: 0,
                        ebsVolumeSize: 0,
                        ebsVolumeThroughput: 0,
                        ebsVolumeType: "string",
                        firstOnDemand: 0,
                        instanceProfileArn: "string",
                        spotBidPricePercent: 0,
                        zoneId: "string",
                    },
                    clusterName: "string",
                    instancePoolId: "string",
                    isSingleNode: false,
                    kind: "string",
                    libraries: [{
                        cran: {
                            "package": "string",
                            repo: "string",
                        },
                        egg: "string",
                        jar: "string",
                        maven: {
                            coordinates: "string",
                            exclusions: ["string"],
                            repo: "string",
                        },
                        pypi: {
                            "package": "string",
                            repo: "string",
                        },
                        requirements: "string",
                        whl: "string",
                    }],
                    nodeTypeId: "string",
                    numWorkers: 0,
                    policyId: "string",
                    runtimeEngine: "string",
                    singleUserName: "string",
                    sparkConf: {
                        string: "string",
                    },
                    sparkEnvVars: {
                        string: "string",
                    },
                    autoscale: {
                        maxWorkers: 0,
                        minWorkers: 0,
                    },
                    applyPolicyDefaultValues: false,
                    useMlRuntime: false,
                    gcpAttributes: {
                        availability: "string",
                        bootDiskSize: 0,
                        googleServiceAccount: "string",
                        localSsdCount: 0,
                        usePreemptibleExecutors: false,
                        zoneId: "string",
                    },
                },
                description: "string",
                notebookTask: {
                    notebookPath: "string",
                    baseParameters: {
                        string: "string",
                    },
                    source: "string",
                    warehouseId: "string",
                },
                emailNotifications: {
                    noAlertForSkippedRuns: false,
                    onDurationWarningThresholdExceededs: ["string"],
                    onFailures: ["string"],
                    onStarts: ["string"],
                    onStreamingBacklogExceededs: ["string"],
                    onSuccesses: ["string"],
                },
                environmentKey: "string",
                existingClusterId: "string",
                genAiComputeTask: {
                    dlRuntimeImage: "string",
                    command: "string",
                    compute: {
                        gpuNodePoolId: "string",
                        numGpus: 0,
                        gpuType: "string",
                    },
                    mlflowExperimentName: "string",
                    source: "string",
                    trainingScriptPath: "string",
                    yamlParameters: "string",
                    yamlParametersFilePath: "string",
                },
                health: {
                    rules: [{
                        metric: "string",
                        op: "string",
                        value: 0,
                    }],
                },
                jobClusterKey: "string",
                notificationSettings: {
                    alertOnLastAttempt: false,
                    noAlertForCanceledRuns: false,
                    noAlertForSkippedRuns: false,
                },
                maxRetries: 0,
                cleanRoomsNotebookTask: {
                    cleanRoomName: "string",
                    notebookName: "string",
                    etag: "string",
                    notebookBaseParameters: {
                        string: "string",
                    },
                },
                dependsOns: [{
                    taskKey: "string",
                    outcome: "string",
                }],
                dbtTask: {
                    commands: ["string"],
                    catalog: "string",
                    profilesDirectory: "string",
                    projectDirectory: "string",
                    schema: "string",
                    source: "string",
                    warehouseId: "string",
                },
                libraries: [{
                    cran: {
                        "package": "string",
                        repo: "string",
                    },
                    egg: "string",
                    jar: "string",
                    maven: {
                        coordinates: "string",
                        exclusions: ["string"],
                        repo: "string",
                    },
                    pypi: {
                        "package": "string",
                        repo: "string",
                    },
                    requirements: "string",
                    whl: "string",
                }],
                pipelineTask: {
                    pipelineId: "string",
                    fullRefresh: false,
                },
                pythonWheelTask: {
                    entryPoint: "string",
                    namedParameters: {
                        string: "string",
                    },
                    packageName: "string",
                    parameters: ["string"],
                },
                retryOnTimeout: false,
                runIf: "string",
                runJobTask: {
                    jobId: 0,
                    dbtCommands: ["string"],
                    jarParams: ["string"],
                    jobParameters: {
                        string: "string",
                    },
                    notebookParams: {
                        string: "string",
                    },
                    pipelineParams: {
                        fullRefresh: false,
                    },
                    pythonNamedParams: {
                        string: "string",
                    },
                    pythonParams: ["string"],
                    sparkSubmitParams: ["string"],
                    sqlParams: {
                        string: "string",
                    },
                },
                sparkJarTask: {
                    jarUri: "string",
                    mainClassName: "string",
                    parameters: ["string"],
                    runAsRepl: false,
                },
                sparkPythonTask: {
                    pythonFile: "string",
                    parameters: ["string"],
                    source: "string",
                },
                sparkSubmitTask: {
                    parameters: ["string"],
                },
                sqlTask: {
                    warehouseId: "string",
                    alert: {
                        alertId: "string",
                        pauseSubscriptions: false,
                        subscriptions: [{
                            destinationId: "string",
                            userName: "string",
                        }],
                    },
                    dashboard: {
                        dashboardId: "string",
                        customSubject: "string",
                        pauseSubscriptions: false,
                        subscriptions: [{
                            destinationId: "string",
                            userName: "string",
                        }],
                    },
                    file: {
                        path: "string",
                        source: "string",
                    },
                    parameters: {
                        string: "string",
                    },
                    query: {
                        queryId: "string",
                    },
                },
                conditionTask: {
                    left: "string",
                    op: "string",
                    right: "string",
                },
                timeoutSeconds: 0,
                webhookNotifications: {
                    onDurationWarningThresholdExceededs: [{
                        id: "string",
                    }],
                    onFailures: [{
                        id: "string",
                    }],
                    onStarts: [{
                        id: "string",
                    }],
                    onStreamingBacklogExceededs: [{
                        id: "string",
                    }],
                    onSuccesses: [{
                        id: "string",
                    }],
                },
            },
            concurrency: 0,
        },
        genAiComputeTask: {
            dlRuntimeImage: "string",
            command: "string",
            compute: {
                gpuNodePoolId: "string",
                numGpus: 0,
                gpuType: "string",
            },
            mlflowExperimentName: "string",
            source: "string",
            trainingScriptPath: "string",
            yamlParameters: "string",
            yamlParametersFilePath: "string",
        },
        health: {
            rules: [{
                metric: "string",
                op: "string",
                value: 0,
            }],
        },
        jobClusterKey: "string",
        libraries: [{
            cran: {
                "package": "string",
                repo: "string",
            },
            egg: "string",
            jar: "string",
            maven: {
                coordinates: "string",
                exclusions: ["string"],
                repo: "string",
            },
            pypi: {
                "package": "string",
                repo: "string",
            },
            requirements: "string",
            whl: "string",
        }],
        cleanRoomsNotebookTask: {
            cleanRoomName: "string",
            notebookName: "string",
            etag: "string",
            notebookBaseParameters: {
                string: "string",
            },
        },
        webhookNotifications: {
            onDurationWarningThresholdExceededs: [{
                id: "string",
            }],
            onFailures: [{
                id: "string",
            }],
            onStarts: [{
                id: "string",
            }],
            onStreamingBacklogExceededs: [{
                id: "string",
            }],
            onSuccesses: [{
                id: "string",
            }],
        },
        dbtTask: {
            commands: ["string"],
            catalog: "string",
            profilesDirectory: "string",
            projectDirectory: "string",
            schema: "string",
            source: "string",
            warehouseId: "string",
        },
        notebookTask: {
            notebookPath: "string",
            baseParameters: {
                string: "string",
            },
            source: "string",
            warehouseId: "string",
        },
        notificationSettings: {
            alertOnLastAttempt: false,
            noAlertForCanceledRuns: false,
            noAlertForSkippedRuns: false,
        },
        pipelineTask: {
            pipelineId: "string",
            fullRefresh: false,
        },
        pythonWheelTask: {
            entryPoint: "string",
            namedParameters: {
                string: "string",
            },
            packageName: "string",
            parameters: ["string"],
        },
        retryOnTimeout: false,
        runIf: "string",
        runJobTask: {
            jobId: 0,
            dbtCommands: ["string"],
            jarParams: ["string"],
            jobParameters: {
                string: "string",
            },
            notebookParams: {
                string: "string",
            },
            pipelineParams: {
                fullRefresh: false,
            },
            pythonNamedParams: {
                string: "string",
            },
            pythonParams: ["string"],
            sparkSubmitParams: ["string"],
            sqlParams: {
                string: "string",
            },
        },
        sparkJarTask: {
            jarUri: "string",
            mainClassName: "string",
            parameters: ["string"],
            runAsRepl: false,
        },
        sparkPythonTask: {
            pythonFile: "string",
            parameters: ["string"],
            source: "string",
        },
        sparkSubmitTask: {
            parameters: ["string"],
        },
        sqlTask: {
            warehouseId: "string",
            alert: {
                alertId: "string",
                pauseSubscriptions: false,
                subscriptions: [{
                    destinationId: "string",
                    userName: "string",
                }],
            },
            dashboard: {
                dashboardId: "string",
                customSubject: "string",
                pauseSubscriptions: false,
                subscriptions: [{
                    destinationId: "string",
                    userName: "string",
                }],
            },
            file: {
                path: "string",
                source: "string",
            },
            parameters: {
                string: "string",
            },
            query: {
                queryId: "string",
            },
        },
        conditionTask: {
            left: "string",
            op: "string",
            right: "string",
        },
        timeoutSeconds: 0,
        minRetryIntervalMillis: 0,
    }],
    timeoutSeconds: 0,
    trigger: {
        fileArrival: {
            url: "string",
            minTimeBetweenTriggersSeconds: 0,
            waitAfterLastChangeSeconds: 0,
        },
        pauseStatus: "string",
        periodic: {
            interval: 0,
            unit: "string",
        },
        table: {
            condition: "string",
            minTimeBetweenTriggersSeconds: 0,
            tableNames: ["string"],
            waitAfterLastChangeSeconds: 0,
        },
        tableUpdate: {
            tableNames: ["string"],
            condition: "string",
            minTimeBetweenTriggersSeconds: 0,
            waitAfterLastChangeSeconds: 0,
        },
    },
    webhookNotifications: {
        onDurationWarningThresholdExceededs: [{
            id: "string",
        }],
        onFailures: [{
            id: "string",
        }],
        onStarts: [{
            id: "string",
        }],
        onStreamingBacklogExceededs: [{
            id: "string",
        }],
        onSuccesses: [{
            id: "string",
        }],
    },
});
Copy
type: databricks:Job
properties:
    budgetPolicyId: string
    continuous:
        pauseStatus: string
    controlRunState: false
    deployment:
        kind: string
        metadataFilePath: string
    description: string
    editMode: string
    emailNotifications:
        noAlertForSkippedRuns: false
        onDurationWarningThresholdExceededs:
            - string
        onFailures:
            - string
        onStarts:
            - string
        onStreamingBacklogExceededs:
            - string
        onSuccesses:
            - string
    environments:
        - environmentKey: string
          spec:
            client: string
            dependencies:
                - string
    existingClusterId: string
    format: string
    gitSource:
        branch: string
        commit: string
        gitSnapshot:
            usedCommit: string
        jobSource:
            dirtyState: string
            importFromGitBranch: string
            jobConfigPath: string
        provider: string
        tag: string
        url: string
    health:
        rules:
            - metric: string
              op: string
              value: 0
    jobClusters:
        - jobClusterKey: string
          newCluster:
            applyPolicyDefaultValues: false
            autoscale:
                maxWorkers: 0
                minWorkers: 0
            awsAttributes:
                availability: string
                ebsVolumeCount: 0
                ebsVolumeIops: 0
                ebsVolumeSize: 0
                ebsVolumeThroughput: 0
                ebsVolumeType: string
                firstOnDemand: 0
                instanceProfileArn: string
                spotBidPricePercent: 0
                zoneId: string
            azureAttributes:
                availability: string
                firstOnDemand: 0
                logAnalyticsInfo:
                    logAnalyticsPrimaryKey: string
                    logAnalyticsWorkspaceId: string
                spotBidMaxPrice: 0
            clusterId: string
            clusterLogConf:
                dbfs:
                    destination: string
                s3:
                    cannedAcl: string
                    destination: string
                    enableEncryption: false
                    encryptionType: string
                    endpoint: string
                    kmsKey: string
                    region: string
                volumes:
                    destination: string
            clusterMountInfos:
                - localMountDirPath: string
                  networkFilesystemInfo:
                    mountOptions: string
                    serverAddress: string
                  remoteMountDirPath: string
            clusterName: string
            customTags:
                string: string
            dataSecurityMode: string
            dockerImage:
                basicAuth:
                    password: string
                    username: string
                url: string
            driverInstancePoolId: string
            driverNodeTypeId: string
            enableElasticDisk: false
            enableLocalDiskEncryption: false
            gcpAttributes:
                availability: string
                bootDiskSize: 0
                googleServiceAccount: string
                localSsdCount: 0
                usePreemptibleExecutors: false
                zoneId: string
            idempotencyToken: string
            initScripts:
                - abfss:
                    destination: string
                  file:
                    destination: string
                  gcs:
                    destination: string
                  s3:
                    cannedAcl: string
                    destination: string
                    enableEncryption: false
                    encryptionType: string
                    endpoint: string
                    kmsKey: string
                    region: string
                  volumes:
                    destination: string
                  workspace:
                    destination: string
            instancePoolId: string
            isSingleNode: false
            kind: string
            libraries:
                - cran:
                    package: string
                    repo: string
                  egg: string
                  jar: string
                  maven:
                    coordinates: string
                    exclusions:
                        - string
                    repo: string
                  pypi:
                    package: string
                    repo: string
                  requirements: string
                  whl: string
            nodeTypeId: string
            numWorkers: 0
            policyId: string
            runtimeEngine: string
            singleUserName: string
            sparkConf:
                string: string
            sparkEnvVars:
                string: string
            sparkVersion: string
            sshPublicKeys:
                - string
            useMlRuntime: false
            workloadType:
                clients:
                    jobs: false
                    notebooks: false
    libraries:
        - cran:
            package: string
            repo: string
          egg: string
          jar: string
          maven:
            coordinates: string
            exclusions:
                - string
            repo: string
          pypi:
            package: string
            repo: string
          requirements: string
          whl: string
    maxConcurrentRuns: 0
    name: string
    newCluster:
        applyPolicyDefaultValues: false
        autoscale:
            maxWorkers: 0
            minWorkers: 0
        awsAttributes:
            availability: string
            ebsVolumeCount: 0
            ebsVolumeIops: 0
            ebsVolumeSize: 0
            ebsVolumeThroughput: 0
            ebsVolumeType: string
            firstOnDemand: 0
            instanceProfileArn: string
            spotBidPricePercent: 0
            zoneId: string
        azureAttributes:
            availability: string
            firstOnDemand: 0
            logAnalyticsInfo:
                logAnalyticsPrimaryKey: string
                logAnalyticsWorkspaceId: string
            spotBidMaxPrice: 0
        clusterId: string
        clusterLogConf:
            dbfs:
                destination: string
            s3:
                cannedAcl: string
                destination: string
                enableEncryption: false
                encryptionType: string
                endpoint: string
                kmsKey: string
                region: string
            volumes:
                destination: string
        clusterMountInfos:
            - localMountDirPath: string
              networkFilesystemInfo:
                mountOptions: string
                serverAddress: string
              remoteMountDirPath: string
        clusterName: string
        customTags:
            string: string
        dataSecurityMode: string
        dockerImage:
            basicAuth:
                password: string
                username: string
            url: string
        driverInstancePoolId: string
        driverNodeTypeId: string
        enableElasticDisk: false
        enableLocalDiskEncryption: false
        gcpAttributes:
            availability: string
            bootDiskSize: 0
            googleServiceAccount: string
            localSsdCount: 0
            usePreemptibleExecutors: false
            zoneId: string
        idempotencyToken: string
        initScripts:
            - abfss:
                destination: string
              file:
                destination: string
              gcs:
                destination: string
              s3:
                cannedAcl: string
                destination: string
                enableEncryption: false
                encryptionType: string
                endpoint: string
                kmsKey: string
                region: string
              volumes:
                destination: string
              workspace:
                destination: string
        instancePoolId: string
        isSingleNode: false
        kind: string
        libraries:
            - cran:
                package: string
                repo: string
              egg: string
              jar: string
              maven:
                coordinates: string
                exclusions:
                    - string
                repo: string
              pypi:
                package: string
                repo: string
              requirements: string
              whl: string
        nodeTypeId: string
        numWorkers: 0
        policyId: string
        runtimeEngine: string
        singleUserName: string
        sparkConf:
            string: string
        sparkEnvVars:
            string: string
        sparkVersion: string
        sshPublicKeys:
            - string
        useMlRuntime: false
        workloadType:
            clients:
                jobs: false
                notebooks: false
    notificationSettings:
        noAlertForCanceledRuns: false
        noAlertForSkippedRuns: false
    parameters:
        - default: string
          name: string
    performanceTarget: string
    queue:
        enabled: false
    runAs:
        servicePrincipalName: string
        userName: string
    schedule:
        pauseStatus: string
        quartzCronExpression: string
        timezoneId: string
    tags:
        string: string
    tasks:
        - cleanRoomsNotebookTask:
            cleanRoomName: string
            etag: string
            notebookBaseParameters:
                string: string
            notebookName: string
          conditionTask:
            left: string
            op: string
            right: string
          dbtTask:
            catalog: string
            commands:
                - string
            profilesDirectory: string
            projectDirectory: string
            schema: string
            source: string
            warehouseId: string
          dependsOns:
            - outcome: string
              taskKey: string
          description: string
          disableAutoOptimization: false
          emailNotifications:
            noAlertForSkippedRuns: false
            onDurationWarningThresholdExceededs:
                - string
            onFailures:
                - string
            onStarts:
                - string
            onStreamingBacklogExceededs:
                - string
            onSuccesses:
                - string
          environmentKey: string
          existingClusterId: string
          forEachTask:
            concurrency: 0
            inputs: string
            task:
                cleanRoomsNotebookTask:
                    cleanRoomName: string
                    etag: string
                    notebookBaseParameters:
                        string: string
                    notebookName: string
                conditionTask:
                    left: string
                    op: string
                    right: string
                dbtTask:
                    catalog: string
                    commands:
                        - string
                    profilesDirectory: string
                    projectDirectory: string
                    schema: string
                    source: string
                    warehouseId: string
                dependsOns:
                    - outcome: string
                      taskKey: string
                description: string
                disableAutoOptimization: false
                emailNotifications:
                    noAlertForSkippedRuns: false
                    onDurationWarningThresholdExceededs:
                        - string
                    onFailures:
                        - string
                    onStarts:
                        - string
                    onStreamingBacklogExceededs:
                        - string
                    onSuccesses:
                        - string
                environmentKey: string
                existingClusterId: string
                genAiComputeTask:
                    command: string
                    compute:
                        gpuNodePoolId: string
                        gpuType: string
                        numGpus: 0
                    dlRuntimeImage: string
                    mlflowExperimentName: string
                    source: string
                    trainingScriptPath: string
                    yamlParameters: string
                    yamlParametersFilePath: string
                health:
                    rules:
                        - metric: string
                          op: string
                          value: 0
                jobClusterKey: string
                libraries:
                    - cran:
                        package: string
                        repo: string
                      egg: string
                      jar: string
                      maven:
                        coordinates: string
                        exclusions:
                            - string
                        repo: string
                      pypi:
                        package: string
                        repo: string
                      requirements: string
                      whl: string
                maxRetries: 0
                minRetryIntervalMillis: 0
                newCluster:
                    applyPolicyDefaultValues: false
                    autoscale:
                        maxWorkers: 0
                        minWorkers: 0
                    awsAttributes:
                        availability: string
                        ebsVolumeCount: 0
                        ebsVolumeIops: 0
                        ebsVolumeSize: 0
                        ebsVolumeThroughput: 0
                        ebsVolumeType: string
                        firstOnDemand: 0
                        instanceProfileArn: string
                        spotBidPricePercent: 0
                        zoneId: string
                    azureAttributes:
                        availability: string
                        firstOnDemand: 0
                        logAnalyticsInfo:
                            logAnalyticsPrimaryKey: string
                            logAnalyticsWorkspaceId: string
                        spotBidMaxPrice: 0
                    clusterId: string
                    clusterLogConf:
                        dbfs:
                            destination: string
                        s3:
                            cannedAcl: string
                            destination: string
                            enableEncryption: false
                            encryptionType: string
                            endpoint: string
                            kmsKey: string
                            region: string
                        volumes:
                            destination: string
                    clusterMountInfos:
                        - localMountDirPath: string
                          networkFilesystemInfo:
                            mountOptions: string
                            serverAddress: string
                          remoteMountDirPath: string
                    clusterName: string
                    customTags:
                        string: string
                    dataSecurityMode: string
                    dockerImage:
                        basicAuth:
                            password: string
                            username: string
                        url: string
                    driverInstancePoolId: string
                    driverNodeTypeId: string
                    enableElasticDisk: false
                    enableLocalDiskEncryption: false
                    gcpAttributes:
                        availability: string
                        bootDiskSize: 0
                        googleServiceAccount: string
                        localSsdCount: 0
                        usePreemptibleExecutors: false
                        zoneId: string
                    idempotencyToken: string
                    initScripts:
                        - abfss:
                            destination: string
                          file:
                            destination: string
                          gcs:
                            destination: string
                          s3:
                            cannedAcl: string
                            destination: string
                            enableEncryption: false
                            encryptionType: string
                            endpoint: string
                            kmsKey: string
                            region: string
                          volumes:
                            destination: string
                          workspace:
                            destination: string
                    instancePoolId: string
                    isSingleNode: false
                    kind: string
                    libraries:
                        - cran:
                            package: string
                            repo: string
                          egg: string
                          jar: string
                          maven:
                            coordinates: string
                            exclusions:
                                - string
                            repo: string
                          pypi:
                            package: string
                            repo: string
                          requirements: string
                          whl: string
                    nodeTypeId: string
                    numWorkers: 0
                    policyId: string
                    runtimeEngine: string
                    singleUserName: string
                    sparkConf:
                        string: string
                    sparkEnvVars:
                        string: string
                    sparkVersion: string
                    sshPublicKeys:
                        - string
                    useMlRuntime: false
                    workloadType:
                        clients:
                            jobs: false
                            notebooks: false
                notebookTask:
                    baseParameters:
                        string: string
                    notebookPath: string
                    source: string
                    warehouseId: string
                notificationSettings:
                    alertOnLastAttempt: false
                    noAlertForCanceledRuns: false
                    noAlertForSkippedRuns: false
                pipelineTask:
                    fullRefresh: false
                    pipelineId: string
                pythonWheelTask:
                    entryPoint: string
                    namedParameters:
                        string: string
                    packageName: string
                    parameters:
                        - string
                retryOnTimeout: false
                runIf: string
                runJobTask:
                    dbtCommands:
                        - string
                    jarParams:
                        - string
                    jobId: 0
                    jobParameters:
                        string: string
                    notebookParams:
                        string: string
                    pipelineParams:
                        fullRefresh: false
                    pythonNamedParams:
                        string: string
                    pythonParams:
                        - string
                    sparkSubmitParams:
                        - string
                    sqlParams:
                        string: string
                sparkJarTask:
                    jarUri: string
                    mainClassName: string
                    parameters:
                        - string
                    runAsRepl: false
                sparkPythonTask:
                    parameters:
                        - string
                    pythonFile: string
                    source: string
                sparkSubmitTask:
                    parameters:
                        - string
                sqlTask:
                    alert:
                        alertId: string
                        pauseSubscriptions: false
                        subscriptions:
                            - destinationId: string
                              userName: string
                    dashboard:
                        customSubject: string
                        dashboardId: string
                        pauseSubscriptions: false
                        subscriptions:
                            - destinationId: string
                              userName: string
                    file:
                        path: string
                        source: string
                    parameters:
                        string: string
                    query:
                        queryId: string
                    warehouseId: string
                taskKey: string
                timeoutSeconds: 0
                webhookNotifications:
                    onDurationWarningThresholdExceededs:
                        - id: string
                    onFailures:
                        - id: string
                    onStarts:
                        - id: string
                    onStreamingBacklogExceededs:
                        - id: string
                    onSuccesses:
                        - id: string
          genAiComputeTask:
            command: string
            compute:
                gpuNodePoolId: string
                gpuType: string
                numGpus: 0
            dlRuntimeImage: string
            mlflowExperimentName: string
            source: string
            trainingScriptPath: string
            yamlParameters: string
            yamlParametersFilePath: string
          health:
            rules:
                - metric: string
                  op: string
                  value: 0
          jobClusterKey: string
          libraries:
            - cran:
                package: string
                repo: string
              egg: string
              jar: string
              maven:
                coordinates: string
                exclusions:
                    - string
                repo: string
              pypi:
                package: string
                repo: string
              requirements: string
              whl: string
          maxRetries: 0
          minRetryIntervalMillis: 0
          newCluster:
            applyPolicyDefaultValues: false
            autoscale:
                maxWorkers: 0
                minWorkers: 0
            awsAttributes:
                availability: string
                ebsVolumeCount: 0
                ebsVolumeIops: 0
                ebsVolumeSize: 0
                ebsVolumeThroughput: 0
                ebsVolumeType: string
                firstOnDemand: 0
                instanceProfileArn: string
                spotBidPricePercent: 0
                zoneId: string
            azureAttributes:
                availability: string
                firstOnDemand: 0
                logAnalyticsInfo:
                    logAnalyticsPrimaryKey: string
                    logAnalyticsWorkspaceId: string
                spotBidMaxPrice: 0
            clusterId: string
            clusterLogConf:
                dbfs:
                    destination: string
                s3:
                    cannedAcl: string
                    destination: string
                    enableEncryption: false
                    encryptionType: string
                    endpoint: string
                    kmsKey: string
                    region: string
                volumes:
                    destination: string
            clusterMountInfos:
                - localMountDirPath: string
                  networkFilesystemInfo:
                    mountOptions: string
                    serverAddress: string
                  remoteMountDirPath: string
            clusterName: string
            customTags:
                string: string
            dataSecurityMode: string
            dockerImage:
                basicAuth:
                    password: string
                    username: string
                url: string
            driverInstancePoolId: string
            driverNodeTypeId: string
            enableElasticDisk: false
            enableLocalDiskEncryption: false
            gcpAttributes:
                availability: string
                bootDiskSize: 0
                googleServiceAccount: string
                localSsdCount: 0
                usePreemptibleExecutors: false
                zoneId: string
            idempotencyToken: string
            initScripts:
                - abfss:
                    destination: string
                  file:
                    destination: string
                  gcs:
                    destination: string
                  s3:
                    cannedAcl: string
                    destination: string
                    enableEncryption: false
                    encryptionType: string
                    endpoint: string
                    kmsKey: string
                    region: string
                  volumes:
                    destination: string
                  workspace:
                    destination: string
            instancePoolId: string
            isSingleNode: false
            kind: string
            libraries:
                - cran:
                    package: string
                    repo: string
                  egg: string
                  jar: string
                  maven:
                    coordinates: string
                    exclusions:
                        - string
                    repo: string
                  pypi:
                    package: string
                    repo: string
                  requirements: string
                  whl: string
            nodeTypeId: string
            numWorkers: 0
            policyId: string
            runtimeEngine: string
            singleUserName: string
            sparkConf:
                string: string
            sparkEnvVars:
                string: string
            sparkVersion: string
            sshPublicKeys:
                - string
            useMlRuntime: false
            workloadType:
                clients:
                    jobs: false
                    notebooks: false
          notebookTask:
            baseParameters:
                string: string
            notebookPath: string
            source: string
            warehouseId: string
          notificationSettings:
            alertOnLastAttempt: false
            noAlertForCanceledRuns: false
            noAlertForSkippedRuns: false
          pipelineTask:
            fullRefresh: false
            pipelineId: string
          pythonWheelTask:
            entryPoint: string
            namedParameters:
                string: string
            packageName: string
            parameters:
                - string
          retryOnTimeout: false
          runIf: string
          runJobTask:
            dbtCommands:
                - string
            jarParams:
                - string
            jobId: 0
            jobParameters:
                string: string
            notebookParams:
                string: string
            pipelineParams:
                fullRefresh: false
            pythonNamedParams:
                string: string
            pythonParams:
                - string
            sparkSubmitParams:
                - string
            sqlParams:
                string: string
          sparkJarTask:
            jarUri: string
            mainClassName: string
            parameters:
                - string
            runAsRepl: false
          sparkPythonTask:
            parameters:
                - string
            pythonFile: string
            source: string
          sparkSubmitTask:
            parameters:
                - string
          sqlTask:
            alert:
                alertId: string
                pauseSubscriptions: false
                subscriptions:
                    - destinationId: string
                      userName: string
            dashboard:
                customSubject: string
                dashboardId: string
                pauseSubscriptions: false
                subscriptions:
                    - destinationId: string
                      userName: string
            file:
                path: string
                source: string
            parameters:
                string: string
            query:
                queryId: string
            warehouseId: string
          taskKey: string
          timeoutSeconds: 0
          webhookNotifications:
            onDurationWarningThresholdExceededs:
                - id: string
            onFailures:
                - id: string
            onStarts:
                - id: string
            onStreamingBacklogExceededs:
                - id: string
            onSuccesses:
                - id: string
    timeoutSeconds: 0
    trigger:
        fileArrival:
            minTimeBetweenTriggersSeconds: 0
            url: string
            waitAfterLastChangeSeconds: 0
        pauseStatus: string
        periodic:
            interval: 0
            unit: string
        table:
            condition: string
            minTimeBetweenTriggersSeconds: 0
            tableNames:
                - string
            waitAfterLastChangeSeconds: 0
        tableUpdate:
            condition: string
            minTimeBetweenTriggersSeconds: 0
            tableNames:
                - string
            waitAfterLastChangeSeconds: 0
    webhookNotifications:
        onDurationWarningThresholdExceededs:
            - id: string
        onFailures:
            - id: string
        onStarts:
            - id: string
        onStreamingBacklogExceededs:
            - id: string
        onSuccesses:
            - id: string
Copy

Job Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The Job resource accepts the following input properties:

AlwaysRunning bool
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

BudgetPolicyId string
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
Continuous JobContinuous
Configuration block to configure pause status. See continuous Configuration Block.
ControlRunState bool

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

DbtTask JobDbtTask

Deprecated: should be used inside a task block and not inside a job block

Deployment JobDeployment
Description string
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
EditMode string
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
EmailNotifications JobEmailNotifications
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
Environments List<JobEnvironment>
ExistingClusterId string
Format string
GitSource JobGitSource
Specifices the a Git repository for task source code. See git_source Configuration Block below.
Health JobHealth
An optional block that specifies the health conditions for the job documented below.
JobClusters List<JobJobCluster>
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
Libraries List<JobLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
MaxConcurrentRuns int
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
MaxRetries int

Deprecated: should be used inside a task block and not inside a job block

MinRetryIntervalMillis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

Name string
An optional name for the job. The default value is Untitled.
NewCluster JobNewCluster
NotebookTask JobNotebookTask

Deprecated: should be used inside a task block and not inside a job block

NotificationSettings JobNotificationSettings
An optional block controlling the notification settings on the job level documented below.
Parameters List<JobParameter>
Specifices job parameter for the job. See parameter Configuration Block
PerformanceTarget string
PipelineTask JobPipelineTask

Deprecated: should be used inside a task block and not inside a job block

PythonWheelTask JobPythonWheelTask

Deprecated: should be used inside a task block and not inside a job block

Queue JobQueue
The queue status for the job. See queue Configuration Block below.
RetryOnTimeout bool

Deprecated: should be used inside a task block and not inside a job block

RunAs JobRunAs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
RunJobTask JobRunJobTask

Deprecated: should be used inside a task block and not inside a job block

Schedule JobSchedule
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
SparkJarTask JobSparkJarTask

Deprecated: should be used inside a task block and not inside a job block

SparkPythonTask JobSparkPythonTask

Deprecated: should be used inside a task block and not inside a job block

SparkSubmitTask JobSparkSubmitTask

Deprecated: should be used inside a task block and not inside a job block

Tags Dictionary<string, string>
An optional map of the tags associated with the job. See tags Configuration Map
Tasks List<JobTask>
A list of task specification that the job will execute. See task Configuration Block below.
TimeoutSeconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
Trigger JobTrigger
The conditions that triggers the job to start. See trigger Configuration Block below.
WebhookNotifications JobWebhookNotifications
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
AlwaysRunning bool
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

BudgetPolicyId string
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
Continuous JobContinuousArgs
Configuration block to configure pause status. See continuous Configuration Block.
ControlRunState bool

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

DbtTask JobDbtTaskArgs

Deprecated: should be used inside a task block and not inside a job block

Deployment JobDeploymentArgs
Description string
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
EditMode string
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
EmailNotifications JobEmailNotificationsArgs
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
Environments []JobEnvironmentArgs
ExistingClusterId string
Format string
GitSource JobGitSourceArgs
Specifices the a Git repository for task source code. See git_source Configuration Block below.
Health JobHealthArgs
An optional block that specifies the health conditions for the job documented below.
JobClusters []JobJobClusterArgs
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
Libraries []JobLibraryArgs
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
MaxConcurrentRuns int
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
MaxRetries int

Deprecated: should be used inside a task block and not inside a job block

MinRetryIntervalMillis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

Name string
An optional name for the job. The default value is Untitled.
NewCluster JobNewClusterArgs
NotebookTask JobNotebookTaskArgs

Deprecated: should be used inside a task block and not inside a job block

NotificationSettings JobNotificationSettingsArgs
An optional block controlling the notification settings on the job level documented below.
Parameters []JobParameterArgs
Specifices job parameter for the job. See parameter Configuration Block
PerformanceTarget string
PipelineTask JobPipelineTaskArgs

Deprecated: should be used inside a task block and not inside a job block

PythonWheelTask JobPythonWheelTaskArgs

Deprecated: should be used inside a task block and not inside a job block

Queue JobQueueArgs
The queue status for the job. See queue Configuration Block below.
RetryOnTimeout bool

Deprecated: should be used inside a task block and not inside a job block

RunAs JobRunAsArgs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
RunJobTask JobRunJobTaskArgs

Deprecated: should be used inside a task block and not inside a job block

Schedule JobScheduleArgs
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
SparkJarTask JobSparkJarTaskArgs

Deprecated: should be used inside a task block and not inside a job block

SparkPythonTask JobSparkPythonTaskArgs

Deprecated: should be used inside a task block and not inside a job block

SparkSubmitTask JobSparkSubmitTaskArgs

Deprecated: should be used inside a task block and not inside a job block

Tags map[string]string
An optional map of the tags associated with the job. See tags Configuration Map
Tasks []JobTaskArgs
A list of task specification that the job will execute. See task Configuration Block below.
TimeoutSeconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
Trigger JobTriggerArgs
The conditions that triggers the job to start. See trigger Configuration Block below.
WebhookNotifications JobWebhookNotificationsArgs
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
alwaysRunning Boolean
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

budgetPolicyId String
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
continuous JobContinuous
Configuration block to configure pause status. See continuous Configuration Block.
controlRunState Boolean

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

dbtTask JobDbtTask

Deprecated: should be used inside a task block and not inside a job block

deployment JobDeployment
description String
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
editMode String
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
emailNotifications JobEmailNotifications
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
environments List<JobEnvironment>
existingClusterId String
format String
gitSource JobGitSource
Specifices the a Git repository for task source code. See git_source Configuration Block below.
health JobHealth
An optional block that specifies the health conditions for the job documented below.
jobClusters List<JobJobCluster>
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
libraries List<JobLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
maxConcurrentRuns Integer
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
maxRetries Integer

Deprecated: should be used inside a task block and not inside a job block

minRetryIntervalMillis Integer
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

name String
An optional name for the job. The default value is Untitled.
newCluster JobNewCluster
notebookTask JobNotebookTask

Deprecated: should be used inside a task block and not inside a job block

notificationSettings JobNotificationSettings
An optional block controlling the notification settings on the job level documented below.
parameters List<JobParameter>
Specifices job parameter for the job. See parameter Configuration Block
performanceTarget String
pipelineTask JobPipelineTask

Deprecated: should be used inside a task block and not inside a job block

pythonWheelTask JobPythonWheelTask

Deprecated: should be used inside a task block and not inside a job block

queue JobQueue
The queue status for the job. See queue Configuration Block below.
retryOnTimeout Boolean

Deprecated: should be used inside a task block and not inside a job block

runAs JobRunAs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
runJobTask JobRunJobTask

Deprecated: should be used inside a task block and not inside a job block

schedule JobSchedule
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
sparkJarTask JobSparkJarTask

Deprecated: should be used inside a task block and not inside a job block

sparkPythonTask JobSparkPythonTask

Deprecated: should be used inside a task block and not inside a job block

sparkSubmitTask JobSparkSubmitTask

Deprecated: should be used inside a task block and not inside a job block

tags Map<String,String>
An optional map of the tags associated with the job. See tags Configuration Map
tasks List<JobTask>
A list of task specification that the job will execute. See task Configuration Block below.
timeoutSeconds Integer
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
trigger JobTrigger
The conditions that triggers the job to start. See trigger Configuration Block below.
webhookNotifications JobWebhookNotifications
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
alwaysRunning boolean
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

budgetPolicyId string
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
continuous JobContinuous
Configuration block to configure pause status. See continuous Configuration Block.
controlRunState boolean

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

dbtTask JobDbtTask

Deprecated: should be used inside a task block and not inside a job block

deployment JobDeployment
description string
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
editMode string
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
emailNotifications JobEmailNotifications
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
environments JobEnvironment[]
existingClusterId string
format string
gitSource JobGitSource
Specifices the a Git repository for task source code. See git_source Configuration Block below.
health JobHealth
An optional block that specifies the health conditions for the job documented below.
jobClusters JobJobCluster[]
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
libraries JobLibrary[]
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
maxConcurrentRuns number
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
maxRetries number

Deprecated: should be used inside a task block and not inside a job block

minRetryIntervalMillis number
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

name string
An optional name for the job. The default value is Untitled.
newCluster JobNewCluster
notebookTask JobNotebookTask

Deprecated: should be used inside a task block and not inside a job block

notificationSettings JobNotificationSettings
An optional block controlling the notification settings on the job level documented below.
parameters JobParameter[]
Specifices job parameter for the job. See parameter Configuration Block
performanceTarget string
pipelineTask JobPipelineTask

Deprecated: should be used inside a task block and not inside a job block

pythonWheelTask JobPythonWheelTask

Deprecated: should be used inside a task block and not inside a job block

queue JobQueue
The queue status for the job. See queue Configuration Block below.
retryOnTimeout boolean

Deprecated: should be used inside a task block and not inside a job block

runAs JobRunAs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
runJobTask JobRunJobTask

Deprecated: should be used inside a task block and not inside a job block

schedule JobSchedule
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
sparkJarTask JobSparkJarTask

Deprecated: should be used inside a task block and not inside a job block

sparkPythonTask JobSparkPythonTask

Deprecated: should be used inside a task block and not inside a job block

sparkSubmitTask JobSparkSubmitTask

Deprecated: should be used inside a task block and not inside a job block

tags {[key: string]: string}
An optional map of the tags associated with the job. See tags Configuration Map
tasks JobTask[]
A list of task specification that the job will execute. See task Configuration Block below.
timeoutSeconds number
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
trigger JobTrigger
The conditions that triggers the job to start. See trigger Configuration Block below.
webhookNotifications JobWebhookNotifications
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
always_running bool
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

budget_policy_id str
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
continuous JobContinuousArgs
Configuration block to configure pause status. See continuous Configuration Block.
control_run_state bool

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

dbt_task JobDbtTaskArgs

Deprecated: should be used inside a task block and not inside a job block

deployment JobDeploymentArgs
description str
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
edit_mode str
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
email_notifications JobEmailNotificationsArgs
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
environments Sequence[JobEnvironmentArgs]
existing_cluster_id str
format str
git_source JobGitSourceArgs
Specifices the a Git repository for task source code. See git_source Configuration Block below.
health JobHealthArgs
An optional block that specifies the health conditions for the job documented below.
job_clusters Sequence[JobJobClusterArgs]
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
libraries Sequence[JobLibraryArgs]
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
max_concurrent_runs int
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
max_retries int

Deprecated: should be used inside a task block and not inside a job block

min_retry_interval_millis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

name str
An optional name for the job. The default value is Untitled.
new_cluster JobNewClusterArgs
notebook_task JobNotebookTaskArgs

Deprecated: should be used inside a task block and not inside a job block

notification_settings JobNotificationSettingsArgs
An optional block controlling the notification settings on the job level documented below.
parameters Sequence[JobParameterArgs]
Specifices job parameter for the job. See parameter Configuration Block
performance_target str
pipeline_task JobPipelineTaskArgs

Deprecated: should be used inside a task block and not inside a job block

python_wheel_task JobPythonWheelTaskArgs

Deprecated: should be used inside a task block and not inside a job block

queue JobQueueArgs
The queue status for the job. See queue Configuration Block below.
retry_on_timeout bool

Deprecated: should be used inside a task block and not inside a job block

run_as JobRunAsArgs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
run_job_task JobRunJobTaskArgs

Deprecated: should be used inside a task block and not inside a job block

schedule JobScheduleArgs
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
spark_jar_task JobSparkJarTaskArgs

Deprecated: should be used inside a task block and not inside a job block

spark_python_task JobSparkPythonTaskArgs

Deprecated: should be used inside a task block and not inside a job block

spark_submit_task JobSparkSubmitTaskArgs

Deprecated: should be used inside a task block and not inside a job block

tags Mapping[str, str]
An optional map of the tags associated with the job. See tags Configuration Map
tasks Sequence[JobTaskArgs]
A list of task specification that the job will execute. See task Configuration Block below.
timeout_seconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
trigger JobTriggerArgs
The conditions that triggers the job to start. See trigger Configuration Block below.
webhook_notifications JobWebhookNotificationsArgs
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
alwaysRunning Boolean
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

budgetPolicyId String
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
continuous Property Map
Configuration block to configure pause status. See continuous Configuration Block.
controlRunState Boolean

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

dbtTask Property Map

Deprecated: should be used inside a task block and not inside a job block

deployment Property Map
description String
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
editMode String
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
emailNotifications Property Map
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
environments List<Property Map>
existingClusterId String
format String
gitSource Property Map
Specifices the a Git repository for task source code. See git_source Configuration Block below.
health Property Map
An optional block that specifies the health conditions for the job documented below.
jobClusters List<Property Map>
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
libraries List<Property Map>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
maxConcurrentRuns Number
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
maxRetries Number

Deprecated: should be used inside a task block and not inside a job block

minRetryIntervalMillis Number
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

name String
An optional name for the job. The default value is Untitled.
newCluster Property Map
notebookTask Property Map

Deprecated: should be used inside a task block and not inside a job block

notificationSettings Property Map
An optional block controlling the notification settings on the job level documented below.
parameters List<Property Map>
Specifices job parameter for the job. See parameter Configuration Block
performanceTarget String
pipelineTask Property Map

Deprecated: should be used inside a task block and not inside a job block

pythonWheelTask Property Map

Deprecated: should be used inside a task block and not inside a job block

queue Property Map
The queue status for the job. See queue Configuration Block below.
retryOnTimeout Boolean

Deprecated: should be used inside a task block and not inside a job block

runAs Property Map
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
runJobTask Property Map

Deprecated: should be used inside a task block and not inside a job block

schedule Property Map
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
sparkJarTask Property Map

Deprecated: should be used inside a task block and not inside a job block

sparkPythonTask Property Map

Deprecated: should be used inside a task block and not inside a job block

sparkSubmitTask Property Map

Deprecated: should be used inside a task block and not inside a job block

tags Map<String>
An optional map of the tags associated with the job. See tags Configuration Map
tasks List<Property Map>
A list of task specification that the job will execute. See task Configuration Block below.
timeoutSeconds Number
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
trigger Property Map
The conditions that triggers the job to start. See trigger Configuration Block below.
webhookNotifications Property Map
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

Outputs

All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:

Id string
The provider-assigned unique ID for this managed resource.
Url string
URL of the job on the given workspace
Id string
The provider-assigned unique ID for this managed resource.
Url string
URL of the job on the given workspace
id String
The provider-assigned unique ID for this managed resource.
url String
URL of the job on the given workspace
id string
The provider-assigned unique ID for this managed resource.
url string
URL of the job on the given workspace
id str
The provider-assigned unique ID for this managed resource.
url str
URL of the job on the given workspace
id String
The provider-assigned unique ID for this managed resource.
url String
URL of the job on the given workspace

Look up Existing Job Resource

Get an existing Job resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: JobState, opts?: CustomResourceOptions): Job
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        always_running: Optional[bool] = None,
        budget_policy_id: Optional[str] = None,
        continuous: Optional[JobContinuousArgs] = None,
        control_run_state: Optional[bool] = None,
        dbt_task: Optional[JobDbtTaskArgs] = None,
        deployment: Optional[JobDeploymentArgs] = None,
        description: Optional[str] = None,
        edit_mode: Optional[str] = None,
        email_notifications: Optional[JobEmailNotificationsArgs] = None,
        environments: Optional[Sequence[JobEnvironmentArgs]] = None,
        existing_cluster_id: Optional[str] = None,
        format: Optional[str] = None,
        git_source: Optional[JobGitSourceArgs] = None,
        health: Optional[JobHealthArgs] = None,
        job_clusters: Optional[Sequence[JobJobClusterArgs]] = None,
        libraries: Optional[Sequence[JobLibraryArgs]] = None,
        max_concurrent_runs: Optional[int] = None,
        max_retries: Optional[int] = None,
        min_retry_interval_millis: Optional[int] = None,
        name: Optional[str] = None,
        new_cluster: Optional[JobNewClusterArgs] = None,
        notebook_task: Optional[JobNotebookTaskArgs] = None,
        notification_settings: Optional[JobNotificationSettingsArgs] = None,
        parameters: Optional[Sequence[JobParameterArgs]] = None,
        performance_target: Optional[str] = None,
        pipeline_task: Optional[JobPipelineTaskArgs] = None,
        python_wheel_task: Optional[JobPythonWheelTaskArgs] = None,
        queue: Optional[JobQueueArgs] = None,
        retry_on_timeout: Optional[bool] = None,
        run_as: Optional[JobRunAsArgs] = None,
        run_job_task: Optional[JobRunJobTaskArgs] = None,
        schedule: Optional[JobScheduleArgs] = None,
        spark_jar_task: Optional[JobSparkJarTaskArgs] = None,
        spark_python_task: Optional[JobSparkPythonTaskArgs] = None,
        spark_submit_task: Optional[JobSparkSubmitTaskArgs] = None,
        tags: Optional[Mapping[str, str]] = None,
        tasks: Optional[Sequence[JobTaskArgs]] = None,
        timeout_seconds: Optional[int] = None,
        trigger: Optional[JobTriggerArgs] = None,
        url: Optional[str] = None,
        webhook_notifications: Optional[JobWebhookNotificationsArgs] = None) -> Job
func GetJob(ctx *Context, name string, id IDInput, state *JobState, opts ...ResourceOption) (*Job, error)
public static Job Get(string name, Input<string> id, JobState? state, CustomResourceOptions? opts = null)
public static Job get(String name, Output<String> id, JobState state, CustomResourceOptions options)
resources:  _:    type: databricks:Job    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AlwaysRunning bool
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

BudgetPolicyId string
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
Continuous JobContinuous
Configuration block to configure pause status. See continuous Configuration Block.
ControlRunState bool

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

DbtTask JobDbtTask

Deprecated: should be used inside a task block and not inside a job block

Deployment JobDeployment
Description string
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
EditMode string
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
EmailNotifications JobEmailNotifications
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
Environments List<JobEnvironment>
ExistingClusterId string
Format string
GitSource JobGitSource
Specifices the a Git repository for task source code. See git_source Configuration Block below.
Health JobHealth
An optional block that specifies the health conditions for the job documented below.
JobClusters List<JobJobCluster>
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
Libraries List<JobLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
MaxConcurrentRuns int
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
MaxRetries int

Deprecated: should be used inside a task block and not inside a job block

MinRetryIntervalMillis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

Name string
An optional name for the job. The default value is Untitled.
NewCluster JobNewCluster
NotebookTask JobNotebookTask

Deprecated: should be used inside a task block and not inside a job block

NotificationSettings JobNotificationSettings
An optional block controlling the notification settings on the job level documented below.
Parameters List<JobParameter>
Specifices job parameter for the job. See parameter Configuration Block
PerformanceTarget string
PipelineTask JobPipelineTask

Deprecated: should be used inside a task block and not inside a job block

PythonWheelTask JobPythonWheelTask

Deprecated: should be used inside a task block and not inside a job block

Queue JobQueue
The queue status for the job. See queue Configuration Block below.
RetryOnTimeout bool

Deprecated: should be used inside a task block and not inside a job block

RunAs JobRunAs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
RunJobTask JobRunJobTask

Deprecated: should be used inside a task block and not inside a job block

Schedule JobSchedule
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
SparkJarTask JobSparkJarTask

Deprecated: should be used inside a task block and not inside a job block

SparkPythonTask JobSparkPythonTask

Deprecated: should be used inside a task block and not inside a job block

SparkSubmitTask JobSparkSubmitTask

Deprecated: should be used inside a task block and not inside a job block

Tags Dictionary<string, string>
An optional map of the tags associated with the job. See tags Configuration Map
Tasks List<JobTask>
A list of task specification that the job will execute. See task Configuration Block below.
TimeoutSeconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
Trigger JobTrigger
The conditions that triggers the job to start. See trigger Configuration Block below.
Url string
URL of the job on the given workspace
WebhookNotifications JobWebhookNotifications
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
AlwaysRunning bool
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

BudgetPolicyId string
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
Continuous JobContinuousArgs
Configuration block to configure pause status. See continuous Configuration Block.
ControlRunState bool

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

DbtTask JobDbtTaskArgs

Deprecated: should be used inside a task block and not inside a job block

Deployment JobDeploymentArgs
Description string
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
EditMode string
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
EmailNotifications JobEmailNotificationsArgs
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
Environments []JobEnvironmentArgs
ExistingClusterId string
Format string
GitSource JobGitSourceArgs
Specifices the a Git repository for task source code. See git_source Configuration Block below.
Health JobHealthArgs
An optional block that specifies the health conditions for the job documented below.
JobClusters []JobJobClusterArgs
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
Libraries []JobLibraryArgs
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
MaxConcurrentRuns int
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
MaxRetries int

Deprecated: should be used inside a task block and not inside a job block

MinRetryIntervalMillis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

Name string
An optional name for the job. The default value is Untitled.
NewCluster JobNewClusterArgs
NotebookTask JobNotebookTaskArgs

Deprecated: should be used inside a task block and not inside a job block

NotificationSettings JobNotificationSettingsArgs
An optional block controlling the notification settings on the job level documented below.
Parameters []JobParameterArgs
Specifices job parameter for the job. See parameter Configuration Block
PerformanceTarget string
PipelineTask JobPipelineTaskArgs

Deprecated: should be used inside a task block and not inside a job block

PythonWheelTask JobPythonWheelTaskArgs

Deprecated: should be used inside a task block and not inside a job block

Queue JobQueueArgs
The queue status for the job. See queue Configuration Block below.
RetryOnTimeout bool

Deprecated: should be used inside a task block and not inside a job block

RunAs JobRunAsArgs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
RunJobTask JobRunJobTaskArgs

Deprecated: should be used inside a task block and not inside a job block

Schedule JobScheduleArgs
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
SparkJarTask JobSparkJarTaskArgs

Deprecated: should be used inside a task block and not inside a job block

SparkPythonTask JobSparkPythonTaskArgs

Deprecated: should be used inside a task block and not inside a job block

SparkSubmitTask JobSparkSubmitTaskArgs

Deprecated: should be used inside a task block and not inside a job block

Tags map[string]string
An optional map of the tags associated with the job. See tags Configuration Map
Tasks []JobTaskArgs
A list of task specification that the job will execute. See task Configuration Block below.
TimeoutSeconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
Trigger JobTriggerArgs
The conditions that triggers the job to start. See trigger Configuration Block below.
Url string
URL of the job on the given workspace
WebhookNotifications JobWebhookNotificationsArgs
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
alwaysRunning Boolean
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

budgetPolicyId String
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
continuous JobContinuous
Configuration block to configure pause status. See continuous Configuration Block.
controlRunState Boolean

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

dbtTask JobDbtTask

Deprecated: should be used inside a task block and not inside a job block

deployment JobDeployment
description String
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
editMode String
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
emailNotifications JobEmailNotifications
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
environments List<JobEnvironment>
existingClusterId String
format String
gitSource JobGitSource
Specifices the a Git repository for task source code. See git_source Configuration Block below.
health JobHealth
An optional block that specifies the health conditions for the job documented below.
jobClusters List<JobJobCluster>
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
libraries List<JobLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
maxConcurrentRuns Integer
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
maxRetries Integer

Deprecated: should be used inside a task block and not inside a job block

minRetryIntervalMillis Integer
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

name String
An optional name for the job. The default value is Untitled.
newCluster JobNewCluster
notebookTask JobNotebookTask

Deprecated: should be used inside a task block and not inside a job block

notificationSettings JobNotificationSettings
An optional block controlling the notification settings on the job level documented below.
parameters List<JobParameter>
Specifices job parameter for the job. See parameter Configuration Block
performanceTarget String
pipelineTask JobPipelineTask

Deprecated: should be used inside a task block and not inside a job block

pythonWheelTask JobPythonWheelTask

Deprecated: should be used inside a task block and not inside a job block

queue JobQueue
The queue status for the job. See queue Configuration Block below.
retryOnTimeout Boolean

Deprecated: should be used inside a task block and not inside a job block

runAs JobRunAs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
runJobTask JobRunJobTask

Deprecated: should be used inside a task block and not inside a job block

schedule JobSchedule
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
sparkJarTask JobSparkJarTask

Deprecated: should be used inside a task block and not inside a job block

sparkPythonTask JobSparkPythonTask

Deprecated: should be used inside a task block and not inside a job block

sparkSubmitTask JobSparkSubmitTask

Deprecated: should be used inside a task block and not inside a job block

tags Map<String,String>
An optional map of the tags associated with the job. See tags Configuration Map
tasks List<JobTask>
A list of task specification that the job will execute. See task Configuration Block below.
timeoutSeconds Integer
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
trigger JobTrigger
The conditions that triggers the job to start. See trigger Configuration Block below.
url String
URL of the job on the given workspace
webhookNotifications JobWebhookNotifications
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
alwaysRunning boolean
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

budgetPolicyId string
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
continuous JobContinuous
Configuration block to configure pause status. See continuous Configuration Block.
controlRunState boolean

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

dbtTask JobDbtTask

Deprecated: should be used inside a task block and not inside a job block

deployment JobDeployment
description string
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
editMode string
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
emailNotifications JobEmailNotifications
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
environments JobEnvironment[]
existingClusterId string
format string
gitSource JobGitSource
Specifices the a Git repository for task source code. See git_source Configuration Block below.
health JobHealth
An optional block that specifies the health conditions for the job documented below.
jobClusters JobJobCluster[]
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
libraries JobLibrary[]
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
maxConcurrentRuns number
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
maxRetries number

Deprecated: should be used inside a task block and not inside a job block

minRetryIntervalMillis number
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

name string
An optional name for the job. The default value is Untitled.
newCluster JobNewCluster
notebookTask JobNotebookTask

Deprecated: should be used inside a task block and not inside a job block

notificationSettings JobNotificationSettings
An optional block controlling the notification settings on the job level documented below.
parameters JobParameter[]
Specifices job parameter for the job. See parameter Configuration Block
performanceTarget string
pipelineTask JobPipelineTask

Deprecated: should be used inside a task block and not inside a job block

pythonWheelTask JobPythonWheelTask

Deprecated: should be used inside a task block and not inside a job block

queue JobQueue
The queue status for the job. See queue Configuration Block below.
retryOnTimeout boolean

Deprecated: should be used inside a task block and not inside a job block

runAs JobRunAs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
runJobTask JobRunJobTask

Deprecated: should be used inside a task block and not inside a job block

schedule JobSchedule
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
sparkJarTask JobSparkJarTask

Deprecated: should be used inside a task block and not inside a job block

sparkPythonTask JobSparkPythonTask

Deprecated: should be used inside a task block and not inside a job block

sparkSubmitTask JobSparkSubmitTask

Deprecated: should be used inside a task block and not inside a job block

tags {[key: string]: string}
An optional map of the tags associated with the job. See tags Configuration Map
tasks JobTask[]
A list of task specification that the job will execute. See task Configuration Block below.
timeoutSeconds number
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
trigger JobTrigger
The conditions that triggers the job to start. See trigger Configuration Block below.
url string
URL of the job on the given workspace
webhookNotifications JobWebhookNotifications
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
always_running bool
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

budget_policy_id str
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
continuous JobContinuousArgs
Configuration block to configure pause status. See continuous Configuration Block.
control_run_state bool

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

dbt_task JobDbtTaskArgs

Deprecated: should be used inside a task block and not inside a job block

deployment JobDeploymentArgs
description str
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
edit_mode str
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
email_notifications JobEmailNotificationsArgs
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
environments Sequence[JobEnvironmentArgs]
existing_cluster_id str
format str
git_source JobGitSourceArgs
Specifices the a Git repository for task source code. See git_source Configuration Block below.
health JobHealthArgs
An optional block that specifies the health conditions for the job documented below.
job_clusters Sequence[JobJobClusterArgs]
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
libraries Sequence[JobLibraryArgs]
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
max_concurrent_runs int
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
max_retries int

Deprecated: should be used inside a task block and not inside a job block

min_retry_interval_millis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

name str
An optional name for the job. The default value is Untitled.
new_cluster JobNewClusterArgs
notebook_task JobNotebookTaskArgs

Deprecated: should be used inside a task block and not inside a job block

notification_settings JobNotificationSettingsArgs
An optional block controlling the notification settings on the job level documented below.
parameters Sequence[JobParameterArgs]
Specifices job parameter for the job. See parameter Configuration Block
performance_target str
pipeline_task JobPipelineTaskArgs

Deprecated: should be used inside a task block and not inside a job block

python_wheel_task JobPythonWheelTaskArgs

Deprecated: should be used inside a task block and not inside a job block

queue JobQueueArgs
The queue status for the job. See queue Configuration Block below.
retry_on_timeout bool

Deprecated: should be used inside a task block and not inside a job block

run_as JobRunAsArgs
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
run_job_task JobRunJobTaskArgs

Deprecated: should be used inside a task block and not inside a job block

schedule JobScheduleArgs
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
spark_jar_task JobSparkJarTaskArgs

Deprecated: should be used inside a task block and not inside a job block

spark_python_task JobSparkPythonTaskArgs

Deprecated: should be used inside a task block and not inside a job block

spark_submit_task JobSparkSubmitTaskArgs

Deprecated: should be used inside a task block and not inside a job block

tags Mapping[str, str]
An optional map of the tags associated with the job. See tags Configuration Map
tasks Sequence[JobTaskArgs]
A list of task specification that the job will execute. See task Configuration Block below.
timeout_seconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
trigger JobTriggerArgs
The conditions that triggers the job to start. See trigger Configuration Block below.
url str
URL of the job on the given workspace
webhook_notifications JobWebhookNotificationsArgs
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
alwaysRunning Boolean
(Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with parameters specified in spark_jar_task or spark_submit_task or spark_python_task or notebook_task blocks.

Deprecated: always_running will be replaced by control_run_state in the next major release.

budgetPolicyId String
The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
continuous Property Map
Configuration block to configure pause status. See continuous Configuration Block.
controlRunState Boolean

(Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the pause_status by stopping the current active run. This flag cannot be set for non-continuous jobs.

When migrating from always_running to control_run_state, set continuous as follows:

dbtTask Property Map

Deprecated: should be used inside a task block and not inside a job block

deployment Property Map
description String
An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
editMode String
If "UI_LOCKED", the user interface for the job will be locked. If "EDITABLE" (the default), the user interface will be editable.
emailNotifications Property Map
(List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
environments List<Property Map>
existingClusterId String
format String
gitSource Property Map
Specifices the a Git repository for task source code. See git_source Configuration Block below.
health Property Map
An optional block that specifies the health conditions for the job documented below.
jobClusters List<Property Map>
A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
libraries List<Property Map>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
maxConcurrentRuns Number
(Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
maxRetries Number

Deprecated: should be used inside a task block and not inside a job block

minRetryIntervalMillis Number
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.

Deprecated: should be used inside a task block and not inside a job block

name String
An optional name for the job. The default value is Untitled.
newCluster Property Map
notebookTask Property Map

Deprecated: should be used inside a task block and not inside a job block

notificationSettings Property Map
An optional block controlling the notification settings on the job level documented below.
parameters List<Property Map>
Specifices job parameter for the job. See parameter Configuration Block
performanceTarget String
pipelineTask Property Map

Deprecated: should be used inside a task block and not inside a job block

pythonWheelTask Property Map

Deprecated: should be used inside a task block and not inside a job block

queue Property Map
The queue status for the job. See queue Configuration Block below.
retryOnTimeout Boolean

Deprecated: should be used inside a task block and not inside a job block

runAs Property Map
The user or the service prinicipal the job runs as. See run_as Configuration Block below.
runJobTask Property Map

Deprecated: should be used inside a task block and not inside a job block

schedule Property Map
An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
sparkJarTask Property Map

Deprecated: should be used inside a task block and not inside a job block

sparkPythonTask Property Map

Deprecated: should be used inside a task block and not inside a job block

sparkSubmitTask Property Map

Deprecated: should be used inside a task block and not inside a job block

tags Map<String>
An optional map of the tags associated with the job. See tags Configuration Map
tasks List<Property Map>
A list of task specification that the job will execute. See task Configuration Block below.
timeoutSeconds Number
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
trigger Property Map
The conditions that triggers the job to start. See trigger Configuration Block below.
url String
URL of the job on the given workspace
webhookNotifications Property Map
(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

Supporting Types

JobContinuous
, JobContinuousArgs

PauseStatus string
Indicate whether this continuous job is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
PauseStatus string
Indicate whether this continuous job is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
pauseStatus String
Indicate whether this continuous job is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
pauseStatus string
Indicate whether this continuous job is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
pause_status str
Indicate whether this continuous job is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
pauseStatus String
Indicate whether this continuous job is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.

JobDbtTask
, JobDbtTaskArgs

Commands This property is required. List<string>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
Catalog string
The name of the catalog to use inside Unity Catalog.
ProfilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
ProjectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
Schema string
The name of the schema dbt should run in. Defaults to default.
Source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
WarehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

Commands This property is required. []string
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
Catalog string
The name of the catalog to use inside Unity Catalog.
ProfilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
ProjectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
Schema string
The name of the schema dbt should run in. Defaults to default.
Source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
WarehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. List<String>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog String
The name of the catalog to use inside Unity Catalog.
profilesDirectory String
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory String
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema String
The name of the schema dbt should run in. Defaults to default.
source String
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId String

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. string[]
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog string
The name of the catalog to use inside Unity Catalog.
profilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema string
The name of the schema dbt should run in. Defaults to default.
source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. Sequence[str]
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog str
The name of the catalog to use inside Unity Catalog.
profiles_directory str
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
project_directory str
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema str
The name of the schema dbt should run in. Defaults to default.
source str
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouse_id str

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. List<String>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog String
The name of the catalog to use inside Unity Catalog.
profilesDirectory String
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory String
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema String
The name of the schema dbt should run in. Defaults to default.
source String
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId String

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

JobDeployment
, JobDeploymentArgs

Kind This property is required. string
MetadataFilePath string
Kind This property is required. string
MetadataFilePath string
kind This property is required. String
metadataFilePath String
kind This property is required. string
metadataFilePath string
kind This property is required. str
metadata_file_path str
kind This property is required. String
metadataFilePath String

JobEmailNotifications
, JobEmailNotificationsArgs

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
OnDurationWarningThresholdExceededs List<string>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

OnFailures List<string>
(List) list of emails to notify when the run fails.
OnStarts List<string>
(List) list of emails to notify when the run starts.
OnStreamingBacklogExceededs List<string>
OnSuccesses List<string>
(List) list of emails to notify when the run completes successfully.
NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
OnDurationWarningThresholdExceededs []string

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

OnFailures []string
(List) list of emails to notify when the run fails.
OnStarts []string
(List) list of emails to notify when the run starts.
OnStreamingBacklogExceededs []string
OnSuccesses []string
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs List<String>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures List<String>
(List) list of emails to notify when the run fails.
onStarts List<String>
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs List<String>
onSuccesses List<String>
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs string[]

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures string[]
(List) list of emails to notify when the run fails.
onStarts string[]
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs string[]
onSuccesses string[]
(List) list of emails to notify when the run completes successfully.
no_alert_for_skipped_runs bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
on_duration_warning_threshold_exceededs Sequence[str]

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

on_failures Sequence[str]
(List) list of emails to notify when the run fails.
on_starts Sequence[str]
(List) list of emails to notify when the run starts.
on_streaming_backlog_exceededs Sequence[str]
on_successes Sequence[str]
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs List<String>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures List<String>
(List) list of emails to notify when the run fails.
onStarts List<String>
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs List<String>
onSuccesses List<String>
(List) list of emails to notify when the run completes successfully.

JobEnvironment
, JobEnvironmentArgs

EnvironmentKey This property is required. string
an unique identifier of the Environment. It will be referenced from environment_key attribute of corresponding task.
Spec JobEnvironmentSpec
block describing the Environment. Consists of following attributes:
EnvironmentKey This property is required. string
an unique identifier of the Environment. It will be referenced from environment_key attribute of corresponding task.
Spec JobEnvironmentSpec
block describing the Environment. Consists of following attributes:
environmentKey This property is required. String
an unique identifier of the Environment. It will be referenced from environment_key attribute of corresponding task.
spec JobEnvironmentSpec
block describing the Environment. Consists of following attributes:
environmentKey This property is required. string
an unique identifier of the Environment. It will be referenced from environment_key attribute of corresponding task.
spec JobEnvironmentSpec
block describing the Environment. Consists of following attributes:
environment_key This property is required. str
an unique identifier of the Environment. It will be referenced from environment_key attribute of corresponding task.
spec JobEnvironmentSpec
block describing the Environment. Consists of following attributes:
environmentKey This property is required. String
an unique identifier of the Environment. It will be referenced from environment_key attribute of corresponding task.
spec Property Map
block describing the Environment. Consists of following attributes:

JobEnvironmentSpec
, JobEnvironmentSpecArgs

Client This property is required. string
client version used by the environment.
Dependencies List<string>
List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
Client This property is required. string
client version used by the environment.
Dependencies []string
List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
client This property is required. String
client version used by the environment.
dependencies List<String>
List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
client This property is required. string
client version used by the environment.
dependencies string[]
List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
client This property is required. str
client version used by the environment.
dependencies Sequence[str]
List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
client This property is required. String
client version used by the environment.
dependencies List<String>
List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.

JobGitSource
, JobGitSourceArgs

Url This property is required. string
URL of the Git repository to use.
Branch string
name of the Git branch to use. Conflicts with tag and commit.
Commit string
hash of Git commit to use. Conflicts with branch and tag.
GitSnapshot JobGitSourceGitSnapshot
JobSource JobGitSourceJobSource
Provider string
case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation): gitHub, gitHubEnterprise, bitbucketCloud, bitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition.
Tag string
name of the Git branch to use. Conflicts with branch and commit.
Url This property is required. string
URL of the Git repository to use.
Branch string
name of the Git branch to use. Conflicts with tag and commit.
Commit string
hash of Git commit to use. Conflicts with branch and tag.
GitSnapshot JobGitSourceGitSnapshot
JobSource JobGitSourceJobSource
Provider string
case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation): gitHub, gitHubEnterprise, bitbucketCloud, bitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition.
Tag string
name of the Git branch to use. Conflicts with branch and commit.
url This property is required. String
URL of the Git repository to use.
branch String
name of the Git branch to use. Conflicts with tag and commit.
commit String
hash of Git commit to use. Conflicts with branch and tag.
gitSnapshot JobGitSourceGitSnapshot
jobSource JobGitSourceJobSource
provider String
case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation): gitHub, gitHubEnterprise, bitbucketCloud, bitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition.
tag String
name of the Git branch to use. Conflicts with branch and commit.
url This property is required. string
URL of the Git repository to use.
branch string
name of the Git branch to use. Conflicts with tag and commit.
commit string
hash of Git commit to use. Conflicts with branch and tag.
gitSnapshot JobGitSourceGitSnapshot
jobSource JobGitSourceJobSource
provider string
case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation): gitHub, gitHubEnterprise, bitbucketCloud, bitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition.
tag string
name of the Git branch to use. Conflicts with branch and commit.
url This property is required. str
URL of the Git repository to use.
branch str
name of the Git branch to use. Conflicts with tag and commit.
commit str
hash of Git commit to use. Conflicts with branch and tag.
git_snapshot JobGitSourceGitSnapshot
job_source JobGitSourceJobSource
provider str
case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation): gitHub, gitHubEnterprise, bitbucketCloud, bitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition.
tag str
name of the Git branch to use. Conflicts with branch and commit.
url This property is required. String
URL of the Git repository to use.
branch String
name of the Git branch to use. Conflicts with tag and commit.
commit String
hash of Git commit to use. Conflicts with branch and tag.
gitSnapshot Property Map
jobSource Property Map
provider String
case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation): gitHub, gitHubEnterprise, bitbucketCloud, bitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition.
tag String
name of the Git branch to use. Conflicts with branch and commit.

JobGitSourceGitSnapshot
, JobGitSourceGitSnapshotArgs

UsedCommit string
UsedCommit string
usedCommit String
usedCommit string
usedCommit String

JobGitSourceJobSource
, JobGitSourceJobSourceArgs

ImportFromGitBranch This property is required. string
JobConfigPath This property is required. string
DirtyState string
ImportFromGitBranch This property is required. string
JobConfigPath This property is required. string
DirtyState string
importFromGitBranch This property is required. String
jobConfigPath This property is required. String
dirtyState String
importFromGitBranch This property is required. string
jobConfigPath This property is required. string
dirtyState string
import_from_git_branch This property is required. str
job_config_path This property is required. str
dirty_state str
importFromGitBranch This property is required. String
jobConfigPath This property is required. String
dirtyState String

JobHealth
, JobHealthArgs

Rules This property is required. List<JobHealthRule>
list of rules that are represented as objects with the following attributes:
Rules This property is required. []JobHealthRule
list of rules that are represented as objects with the following attributes:
rules This property is required. List<JobHealthRule>
list of rules that are represented as objects with the following attributes:
rules This property is required. JobHealthRule[]
list of rules that are represented as objects with the following attributes:
rules This property is required. Sequence[JobHealthRule]
list of rules that are represented as objects with the following attributes:
rules This property is required. List<Property Map>
list of rules that are represented as objects with the following attributes:

JobHealthRule
, JobHealthRuleArgs

Metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
Op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
Value This property is required. int
integer value used to compare to the given metric.
Metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
Op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
Value This property is required. int
integer value used to compare to the given metric.
metric This property is required. String
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. String
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. Integer
integer value used to compare to the given metric.
metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. number
integer value used to compare to the given metric.
metric This property is required. str
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. str
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. int
integer value used to compare to the given metric.
metric This property is required. String
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. String
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. Number
integer value used to compare to the given metric.

JobJobCluster
, JobJobClusterArgs

JobClusterKey This property is required. string
Identifier that can be referenced in task block, so that cluster is shared between tasks
NewCluster This property is required. JobJobClusterNewCluster
Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
JobClusterKey This property is required. string
Identifier that can be referenced in task block, so that cluster is shared between tasks
NewCluster This property is required. JobJobClusterNewCluster
Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
jobClusterKey This property is required. String
Identifier that can be referenced in task block, so that cluster is shared between tasks
newCluster This property is required. JobJobClusterNewCluster
Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
jobClusterKey This property is required. string
Identifier that can be referenced in task block, so that cluster is shared between tasks
newCluster This property is required. JobJobClusterNewCluster
Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
job_cluster_key This property is required. str
Identifier that can be referenced in task block, so that cluster is shared between tasks
new_cluster This property is required. JobJobClusterNewCluster
Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
jobClusterKey This property is required. String
Identifier that can be referenced in task block, so that cluster is shared between tasks
newCluster This property is required. Property Map
Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):

JobJobClusterNewCluster
, JobJobClusterNewClusterArgs

SparkVersion This property is required. string
ApplyPolicyDefaultValues bool
Autoscale JobJobClusterNewClusterAutoscale
AwsAttributes JobJobClusterNewClusterAwsAttributes
AzureAttributes JobJobClusterNewClusterAzureAttributes
ClusterId string
ClusterLogConf JobJobClusterNewClusterClusterLogConf
ClusterMountInfos List<JobJobClusterNewClusterClusterMountInfo>
ClusterName string
CustomTags Dictionary<string, string>
DataSecurityMode string
DockerImage JobJobClusterNewClusterDockerImage
DriverInstancePoolId string
DriverNodeTypeId string
EnableElasticDisk bool
EnableLocalDiskEncryption bool
GcpAttributes JobJobClusterNewClusterGcpAttributes
IdempotencyToken Changes to this property will trigger replacement. string
InitScripts List<JobJobClusterNewClusterInitScript>
InstancePoolId string
IsSingleNode bool
Kind string
Libraries List<JobJobClusterNewClusterLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
NodeTypeId string
NumWorkers int
PolicyId string
RuntimeEngine string
SingleUserName string
SparkConf Dictionary<string, string>
SparkEnvVars Dictionary<string, string>
SshPublicKeys List<string>
UseMlRuntime bool
WorkloadType JobJobClusterNewClusterWorkloadType
isn't supported
SparkVersion This property is required. string
ApplyPolicyDefaultValues bool
Autoscale JobJobClusterNewClusterAutoscale
AwsAttributes JobJobClusterNewClusterAwsAttributes
AzureAttributes JobJobClusterNewClusterAzureAttributes
ClusterId string
ClusterLogConf JobJobClusterNewClusterClusterLogConf
ClusterMountInfos []JobJobClusterNewClusterClusterMountInfo
ClusterName string
CustomTags map[string]string
DataSecurityMode string
DockerImage JobJobClusterNewClusterDockerImage
DriverInstancePoolId string
DriverNodeTypeId string
EnableElasticDisk bool
EnableLocalDiskEncryption bool
GcpAttributes JobJobClusterNewClusterGcpAttributes
IdempotencyToken Changes to this property will trigger replacement. string
InitScripts []JobJobClusterNewClusterInitScript
InstancePoolId string
IsSingleNode bool
Kind string
Libraries []JobJobClusterNewClusterLibrary
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
NodeTypeId string
NumWorkers int
PolicyId string
RuntimeEngine string
SingleUserName string
SparkConf map[string]string
SparkEnvVars map[string]string
SshPublicKeys []string
UseMlRuntime bool
WorkloadType JobJobClusterNewClusterWorkloadType
isn't supported
sparkVersion This property is required. String
applyPolicyDefaultValues Boolean
autoscale JobJobClusterNewClusterAutoscale
awsAttributes JobJobClusterNewClusterAwsAttributes
azureAttributes JobJobClusterNewClusterAzureAttributes
clusterId String
clusterLogConf JobJobClusterNewClusterClusterLogConf
clusterMountInfos List<JobJobClusterNewClusterClusterMountInfo>
clusterName String
customTags Map<String,String>
dataSecurityMode String
dockerImage JobJobClusterNewClusterDockerImage
driverInstancePoolId String
driverNodeTypeId String
enableElasticDisk Boolean
enableLocalDiskEncryption Boolean
gcpAttributes JobJobClusterNewClusterGcpAttributes
idempotencyToken Changes to this property will trigger replacement. String
initScripts List<JobJobClusterNewClusterInitScript>
instancePoolId String
isSingleNode Boolean
kind String
libraries List<JobJobClusterNewClusterLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
nodeTypeId String
numWorkers Integer
policyId String
runtimeEngine String
singleUserName String
sparkConf Map<String,String>
sparkEnvVars Map<String,String>
sshPublicKeys List<String>
useMlRuntime Boolean
workloadType JobJobClusterNewClusterWorkloadType
isn't supported
spark_version This property is required. str
apply_policy_default_values bool
autoscale JobJobClusterNewClusterAutoscale
aws_attributes JobJobClusterNewClusterAwsAttributes
azure_attributes JobJobClusterNewClusterAzureAttributes
cluster_id str
cluster_log_conf JobJobClusterNewClusterClusterLogConf
cluster_mount_infos Sequence[JobJobClusterNewClusterClusterMountInfo]
cluster_name str
custom_tags Mapping[str, str]
data_security_mode str
docker_image JobJobClusterNewClusterDockerImage
driver_instance_pool_id str
driver_node_type_id str
enable_elastic_disk bool
enable_local_disk_encryption bool
gcp_attributes JobJobClusterNewClusterGcpAttributes
idempotency_token Changes to this property will trigger replacement. str
init_scripts Sequence[JobJobClusterNewClusterInitScript]
instance_pool_id str
is_single_node bool
kind str
libraries Sequence[JobJobClusterNewClusterLibrary]
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
node_type_id str
num_workers int
policy_id str
runtime_engine str
single_user_name str
spark_conf Mapping[str, str]
spark_env_vars Mapping[str, str]
ssh_public_keys Sequence[str]
use_ml_runtime bool
workload_type JobJobClusterNewClusterWorkloadType
isn't supported
sparkVersion This property is required. String
applyPolicyDefaultValues Boolean
autoscale Property Map
awsAttributes Property Map
azureAttributes Property Map
clusterId String
clusterLogConf Property Map
clusterMountInfos List<Property Map>
clusterName String
customTags Map<String>
dataSecurityMode String
dockerImage Property Map
driverInstancePoolId String
driverNodeTypeId String
enableElasticDisk Boolean
enableLocalDiskEncryption Boolean
gcpAttributes Property Map
idempotencyToken Changes to this property will trigger replacement. String
initScripts List<Property Map>
instancePoolId String
isSingleNode Boolean
kind String
libraries List<Property Map>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
nodeTypeId String
numWorkers Number
policyId String
runtimeEngine String
singleUserName String
sparkConf Map<String>
sparkEnvVars Map<String>
sshPublicKeys List<String>
useMlRuntime Boolean
workloadType Property Map
isn't supported

JobJobClusterNewClusterAutoscale
, JobJobClusterNewClusterAutoscaleArgs

maxWorkers Integer
minWorkers Integer
maxWorkers number
minWorkers number
maxWorkers Number
minWorkers Number

JobJobClusterNewClusterAwsAttributes
, JobJobClusterNewClusterAwsAttributesArgs

JobJobClusterNewClusterAzureAttributes
, JobJobClusterNewClusterAzureAttributesArgs

JobJobClusterNewClusterAzureAttributesLogAnalyticsInfo
, JobJobClusterNewClusterAzureAttributesLogAnalyticsInfoArgs

JobJobClusterNewClusterClusterLogConf
, JobJobClusterNewClusterClusterLogConfArgs

JobJobClusterNewClusterClusterLogConfDbfs
, JobJobClusterNewClusterClusterLogConfDbfsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobJobClusterNewClusterClusterLogConfS3
, JobJobClusterNewClusterClusterLogConfS3Args

Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination This property is required. string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination This property is required. str
canned_acl str
enable_encryption bool
encryption_type str
endpoint str
kms_key str
region str
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

JobJobClusterNewClusterClusterLogConfVolumes
, JobJobClusterNewClusterClusterLogConfVolumesArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobJobClusterNewClusterClusterMountInfo
, JobJobClusterNewClusterClusterMountInfoArgs

localMountDirPath This property is required. String
networkFilesystemInfo This property is required. Property Map
remoteMountDirPath String

JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfo
, JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfoArgs

ServerAddress This property is required. string
MountOptions string
ServerAddress This property is required. string
MountOptions string
serverAddress This property is required. String
mountOptions String
serverAddress This property is required. string
mountOptions string
server_address This property is required. str
mount_options str
serverAddress This property is required. String
mountOptions String

JobJobClusterNewClusterDockerImage
, JobJobClusterNewClusterDockerImageArgs

Url This property is required. string
URL of the job on the given workspace
BasicAuth JobJobClusterNewClusterDockerImageBasicAuth
Url This property is required. string
URL of the job on the given workspace
BasicAuth JobJobClusterNewClusterDockerImageBasicAuth
url This property is required. String
URL of the job on the given workspace
basicAuth JobJobClusterNewClusterDockerImageBasicAuth
url This property is required. string
URL of the job on the given workspace
basicAuth JobJobClusterNewClusterDockerImageBasicAuth
url This property is required. str
URL of the job on the given workspace
basic_auth JobJobClusterNewClusterDockerImageBasicAuth
url This property is required. String
URL of the job on the given workspace
basicAuth Property Map

JobJobClusterNewClusterDockerImageBasicAuth
, JobJobClusterNewClusterDockerImageBasicAuthArgs

Password This property is required. string
Username This property is required. string
Password This property is required. string
Username This property is required. string
password This property is required. String
username This property is required. String
password This property is required. string
username This property is required. string
password This property is required. str
username This property is required. str
password This property is required. String
username This property is required. String

JobJobClusterNewClusterGcpAttributes
, JobJobClusterNewClusterGcpAttributesArgs

JobJobClusterNewClusterInitScript
, JobJobClusterNewClusterInitScriptArgs

abfss Property Map
dbfs Property Map

Deprecated: For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'.

file Property Map
block consisting of single string fields:
gcs Property Map
s3 Property Map
volumes Property Map
workspace Property Map

JobJobClusterNewClusterInitScriptAbfss
, JobJobClusterNewClusterInitScriptAbfssArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobJobClusterNewClusterInitScriptDbfs
, JobJobClusterNewClusterInitScriptDbfsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobJobClusterNewClusterInitScriptFile
, JobJobClusterNewClusterInitScriptFileArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobJobClusterNewClusterInitScriptGcs
, JobJobClusterNewClusterInitScriptGcsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobJobClusterNewClusterInitScriptS3
, JobJobClusterNewClusterInitScriptS3Args

Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination This property is required. string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination This property is required. str
canned_acl str
enable_encryption bool
encryption_type str
endpoint str
kms_key str
region str
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

JobJobClusterNewClusterInitScriptVolumes
, JobJobClusterNewClusterInitScriptVolumesArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobJobClusterNewClusterInitScriptWorkspace
, JobJobClusterNewClusterInitScriptWorkspaceArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobJobClusterNewClusterLibrary
, JobJobClusterNewClusterLibraryArgs

JobJobClusterNewClusterLibraryCran
, JobJobClusterNewClusterLibraryCranArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobJobClusterNewClusterLibraryMaven
, JobJobClusterNewClusterLibraryMavenArgs

Coordinates This property is required. string
Exclusions List<string>
Repo string
Coordinates This property is required. string
Exclusions []string
Repo string
coordinates This property is required. String
exclusions List<String>
repo String
coordinates This property is required. string
exclusions string[]
repo string
coordinates This property is required. str
exclusions Sequence[str]
repo str
coordinates This property is required. String
exclusions List<String>
repo String

JobJobClusterNewClusterLibraryPypi
, JobJobClusterNewClusterLibraryPypiArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobJobClusterNewClusterWorkloadType
, JobJobClusterNewClusterWorkloadTypeArgs

clients This property is required. Property Map

JobJobClusterNewClusterWorkloadTypeClients
, JobJobClusterNewClusterWorkloadTypeClientsArgs

Jobs bool
Notebooks bool
Jobs bool
Notebooks bool
jobs Boolean
notebooks Boolean
jobs boolean
notebooks boolean
jobs bool
notebooks bool
jobs Boolean
notebooks Boolean

JobLibrary
, JobLibraryArgs

JobLibraryCran
, JobLibraryCranArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobLibraryMaven
, JobLibraryMavenArgs

Coordinates This property is required. string
Exclusions List<string>
Repo string
Coordinates This property is required. string
Exclusions []string
Repo string
coordinates This property is required. String
exclusions List<String>
repo String
coordinates This property is required. string
exclusions string[]
repo string
coordinates This property is required. str
exclusions Sequence[str]
repo str
coordinates This property is required. String
exclusions List<String>
repo String

JobLibraryPypi
, JobLibraryPypiArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobNewCluster
, JobNewClusterArgs

SparkVersion This property is required. string
ApplyPolicyDefaultValues bool
Autoscale JobNewClusterAutoscale
AwsAttributes JobNewClusterAwsAttributes
AzureAttributes JobNewClusterAzureAttributes
ClusterId string
ClusterLogConf JobNewClusterClusterLogConf
ClusterMountInfos List<JobNewClusterClusterMountInfo>
ClusterName string
CustomTags Dictionary<string, string>
DataSecurityMode string
DockerImage JobNewClusterDockerImage
DriverInstancePoolId string
DriverNodeTypeId string
EnableElasticDisk bool
EnableLocalDiskEncryption bool
GcpAttributes JobNewClusterGcpAttributes
IdempotencyToken Changes to this property will trigger replacement. string
InitScripts List<JobNewClusterInitScript>
InstancePoolId string
IsSingleNode bool
Kind string
Libraries List<JobNewClusterLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
NodeTypeId string
NumWorkers int
PolicyId string
RuntimeEngine string
SingleUserName string
SparkConf Dictionary<string, string>
SparkEnvVars Dictionary<string, string>
SshPublicKeys List<string>
UseMlRuntime bool
WorkloadType JobNewClusterWorkloadType
isn't supported
SparkVersion This property is required. string
ApplyPolicyDefaultValues bool
Autoscale JobNewClusterAutoscale
AwsAttributes JobNewClusterAwsAttributes
AzureAttributes JobNewClusterAzureAttributes
ClusterId string
ClusterLogConf JobNewClusterClusterLogConf
ClusterMountInfos []JobNewClusterClusterMountInfo
ClusterName string
CustomTags map[string]string
DataSecurityMode string
DockerImage JobNewClusterDockerImage
DriverInstancePoolId string
DriverNodeTypeId string
EnableElasticDisk bool
EnableLocalDiskEncryption bool
GcpAttributes JobNewClusterGcpAttributes
IdempotencyToken Changes to this property will trigger replacement. string
InitScripts []JobNewClusterInitScript
InstancePoolId string
IsSingleNode bool
Kind string
Libraries []JobNewClusterLibrary
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
NodeTypeId string
NumWorkers int
PolicyId string
RuntimeEngine string
SingleUserName string
SparkConf map[string]string
SparkEnvVars map[string]string
SshPublicKeys []string
UseMlRuntime bool
WorkloadType JobNewClusterWorkloadType
isn't supported
sparkVersion This property is required. String
applyPolicyDefaultValues Boolean
autoscale JobNewClusterAutoscale
awsAttributes JobNewClusterAwsAttributes
azureAttributes JobNewClusterAzureAttributes
clusterId String
clusterLogConf JobNewClusterClusterLogConf
clusterMountInfos List<JobNewClusterClusterMountInfo>
clusterName String
customTags Map<String,String>
dataSecurityMode String
dockerImage JobNewClusterDockerImage
driverInstancePoolId String
driverNodeTypeId String
enableElasticDisk Boolean
enableLocalDiskEncryption Boolean
gcpAttributes JobNewClusterGcpAttributes
idempotencyToken Changes to this property will trigger replacement. String
initScripts List<JobNewClusterInitScript>
instancePoolId String
isSingleNode Boolean
kind String
libraries List<JobNewClusterLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
nodeTypeId String
numWorkers Integer
policyId String
runtimeEngine String
singleUserName String
sparkConf Map<String,String>
sparkEnvVars Map<String,String>
sshPublicKeys List<String>
useMlRuntime Boolean
workloadType JobNewClusterWorkloadType
isn't supported
spark_version This property is required. str
apply_policy_default_values bool
autoscale JobNewClusterAutoscale
aws_attributes JobNewClusterAwsAttributes
azure_attributes JobNewClusterAzureAttributes
cluster_id str
cluster_log_conf JobNewClusterClusterLogConf
cluster_mount_infos Sequence[JobNewClusterClusterMountInfo]
cluster_name str
custom_tags Mapping[str, str]
data_security_mode str
docker_image JobNewClusterDockerImage
driver_instance_pool_id str
driver_node_type_id str
enable_elastic_disk bool
enable_local_disk_encryption bool
gcp_attributes JobNewClusterGcpAttributes
idempotency_token Changes to this property will trigger replacement. str
init_scripts Sequence[JobNewClusterInitScript]
instance_pool_id str
is_single_node bool
kind str
libraries Sequence[JobNewClusterLibrary]
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
node_type_id str
num_workers int
policy_id str
runtime_engine str
single_user_name str
spark_conf Mapping[str, str]
spark_env_vars Mapping[str, str]
ssh_public_keys Sequence[str]
use_ml_runtime bool
workload_type JobNewClusterWorkloadType
isn't supported
sparkVersion This property is required. String
applyPolicyDefaultValues Boolean
autoscale Property Map
awsAttributes Property Map
azureAttributes Property Map
clusterId String
clusterLogConf Property Map
clusterMountInfos List<Property Map>
clusterName String
customTags Map<String>
dataSecurityMode String
dockerImage Property Map
driverInstancePoolId String
driverNodeTypeId String
enableElasticDisk Boolean
enableLocalDiskEncryption Boolean
gcpAttributes Property Map
idempotencyToken Changes to this property will trigger replacement. String
initScripts List<Property Map>
instancePoolId String
isSingleNode Boolean
kind String
libraries List<Property Map>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
nodeTypeId String
numWorkers Number
policyId String
runtimeEngine String
singleUserName String
sparkConf Map<String>
sparkEnvVars Map<String>
sshPublicKeys List<String>
useMlRuntime Boolean
workloadType Property Map
isn't supported

JobNewClusterAutoscale
, JobNewClusterAutoscaleArgs

maxWorkers Integer
minWorkers Integer
maxWorkers number
minWorkers number
maxWorkers Number
minWorkers Number

JobNewClusterAwsAttributes
, JobNewClusterAwsAttributesArgs

JobNewClusterAzureAttributes
, JobNewClusterAzureAttributesArgs

JobNewClusterAzureAttributesLogAnalyticsInfo
, JobNewClusterAzureAttributesLogAnalyticsInfoArgs

JobNewClusterClusterLogConf
, JobNewClusterClusterLogConfArgs

JobNewClusterClusterLogConfDbfs
, JobNewClusterClusterLogConfDbfsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobNewClusterClusterLogConfS3
, JobNewClusterClusterLogConfS3Args

Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination This property is required. string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination This property is required. str
canned_acl str
enable_encryption bool
encryption_type str
endpoint str
kms_key str
region str
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

JobNewClusterClusterLogConfVolumes
, JobNewClusterClusterLogConfVolumesArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobNewClusterClusterMountInfo
, JobNewClusterClusterMountInfoArgs

localMountDirPath This property is required. String
networkFilesystemInfo This property is required. Property Map
remoteMountDirPath String

JobNewClusterClusterMountInfoNetworkFilesystemInfo
, JobNewClusterClusterMountInfoNetworkFilesystemInfoArgs

ServerAddress This property is required. string
MountOptions string
ServerAddress This property is required. string
MountOptions string
serverAddress This property is required. String
mountOptions String
serverAddress This property is required. string
mountOptions string
server_address This property is required. str
mount_options str
serverAddress This property is required. String
mountOptions String

JobNewClusterDockerImage
, JobNewClusterDockerImageArgs

Url This property is required. string
URL of the job on the given workspace
BasicAuth JobNewClusterDockerImageBasicAuth
Url This property is required. string
URL of the job on the given workspace
BasicAuth JobNewClusterDockerImageBasicAuth
url This property is required. String
URL of the job on the given workspace
basicAuth JobNewClusterDockerImageBasicAuth
url This property is required. string
URL of the job on the given workspace
basicAuth JobNewClusterDockerImageBasicAuth
url This property is required. str
URL of the job on the given workspace
basic_auth JobNewClusterDockerImageBasicAuth
url This property is required. String
URL of the job on the given workspace
basicAuth Property Map

JobNewClusterDockerImageBasicAuth
, JobNewClusterDockerImageBasicAuthArgs

Password This property is required. string
Username This property is required. string
Password This property is required. string
Username This property is required. string
password This property is required. String
username This property is required. String
password This property is required. string
username This property is required. string
password This property is required. str
username This property is required. str
password This property is required. String
username This property is required. String

JobNewClusterGcpAttributes
, JobNewClusterGcpAttributesArgs

JobNewClusterInitScript
, JobNewClusterInitScriptArgs

abfss Property Map
dbfs Property Map

Deprecated: For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'.

file Property Map
block consisting of single string fields:
gcs Property Map
s3 Property Map
volumes Property Map
workspace Property Map

JobNewClusterInitScriptAbfss
, JobNewClusterInitScriptAbfssArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobNewClusterInitScriptDbfs
, JobNewClusterInitScriptDbfsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobNewClusterInitScriptFile
, JobNewClusterInitScriptFileArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobNewClusterInitScriptGcs
, JobNewClusterInitScriptGcsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobNewClusterInitScriptS3
, JobNewClusterInitScriptS3Args

Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination This property is required. string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination This property is required. str
canned_acl str
enable_encryption bool
encryption_type str
endpoint str
kms_key str
region str
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

JobNewClusterInitScriptVolumes
, JobNewClusterInitScriptVolumesArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobNewClusterInitScriptWorkspace
, JobNewClusterInitScriptWorkspaceArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobNewClusterLibrary
, JobNewClusterLibraryArgs

JobNewClusterLibraryCran
, JobNewClusterLibraryCranArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobNewClusterLibraryMaven
, JobNewClusterLibraryMavenArgs

Coordinates This property is required. string
Exclusions List<string>
Repo string
Coordinates This property is required. string
Exclusions []string
Repo string
coordinates This property is required. String
exclusions List<String>
repo String
coordinates This property is required. string
exclusions string[]
repo string
coordinates This property is required. str
exclusions Sequence[str]
repo str
coordinates This property is required. String
exclusions List<String>
repo String

JobNewClusterLibraryPypi
, JobNewClusterLibraryPypiArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobNewClusterWorkloadType
, JobNewClusterWorkloadTypeArgs

Clients This property is required. JobNewClusterWorkloadTypeClients
Clients This property is required. JobNewClusterWorkloadTypeClients
clients This property is required. JobNewClusterWorkloadTypeClients
clients This property is required. JobNewClusterWorkloadTypeClients
clients This property is required. JobNewClusterWorkloadTypeClients
clients This property is required. Property Map

JobNewClusterWorkloadTypeClients
, JobNewClusterWorkloadTypeClientsArgs

Jobs bool
Notebooks bool
Jobs bool
Notebooks bool
jobs Boolean
notebooks Boolean
jobs boolean
notebooks boolean
jobs bool
notebooks bool
jobs Boolean
notebooks Boolean

JobNotebookTask
, JobNotebookTaskArgs

NotebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
BaseParameters Dictionary<string, string>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
Source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
WarehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
NotebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
BaseParameters map[string]string
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
Source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
WarehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. String
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters Map<String,String>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source String
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId String
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters {[key: string]: string}
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebook_path This property is required. str
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
base_parameters Mapping[str, str]
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source str
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouse_id str
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. String
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters Map<String>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source String
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId String
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.

JobNotificationSettings
, JobNotificationSettingsArgs

NoAlertForCanceledRuns bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs.
NoAlertForCanceledRuns bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs.
noAlertForCanceledRuns Boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs.
noAlertForCanceledRuns boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns boolean
(Bool) don't send alert for skipped runs.
no_alert_for_canceled_runs bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

no_alert_for_skipped_runs bool
(Bool) don't send alert for skipped runs.
noAlertForCanceledRuns Boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs.

JobParameter
, JobParameterArgs

Default This property is required. string

Default value of the parameter.

You can use this block only together with task blocks, not with the legacy tasks specification!

Name This property is required. string
The name of the defined parameter. May only contain alphanumeric characters, _, -, and ..
Default This property is required. string

Default value of the parameter.

You can use this block only together with task blocks, not with the legacy tasks specification!

Name This property is required. string
The name of the defined parameter. May only contain alphanumeric characters, _, -, and ..
default_ This property is required. String

Default value of the parameter.

You can use this block only together with task blocks, not with the legacy tasks specification!

name This property is required. String
The name of the defined parameter. May only contain alphanumeric characters, _, -, and ..
default This property is required. string

Default value of the parameter.

You can use this block only together with task blocks, not with the legacy tasks specification!

name This property is required. string
The name of the defined parameter. May only contain alphanumeric characters, _, -, and ..
default This property is required. str

Default value of the parameter.

You can use this block only together with task blocks, not with the legacy tasks specification!

name This property is required. str
The name of the defined parameter. May only contain alphanumeric characters, _, -, and ..
default This property is required. String

Default value of the parameter.

You can use this block only together with task blocks, not with the legacy tasks specification!

name This property is required. String
The name of the defined parameter. May only contain alphanumeric characters, _, -, and ..

JobPipelineTask
, JobPipelineTaskArgs

PipelineId This property is required. string
The pipeline's unique ID.
FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

PipelineId This property is required. string
The pipeline's unique ID.
FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. String
The pipeline's unique ID.
fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. string
The pipeline's unique ID.
fullRefresh boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipeline_id This property is required. str
The pipeline's unique ID.
full_refresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. String
The pipeline's unique ID.
fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

JobPythonWheelTask
, JobPythonWheelTaskArgs

EntryPoint string
Python function as entry point for the task
NamedParameters Dictionary<string, string>
Named parameters for the task
PackageName string
Name of Python package
Parameters List<string>
Parameters for the task
EntryPoint string
Python function as entry point for the task
NamedParameters map[string]string
Named parameters for the task
PackageName string
Name of Python package
Parameters []string
Parameters for the task
entryPoint String
Python function as entry point for the task
namedParameters Map<String,String>
Named parameters for the task
packageName String
Name of Python package
parameters List<String>
Parameters for the task
entryPoint string
Python function as entry point for the task
namedParameters {[key: string]: string}
Named parameters for the task
packageName string
Name of Python package
parameters string[]
Parameters for the task
entry_point str
Python function as entry point for the task
named_parameters Mapping[str, str]
Named parameters for the task
package_name str
Name of Python package
parameters Sequence[str]
Parameters for the task
entryPoint String
Python function as entry point for the task
namedParameters Map<String>
Named parameters for the task
packageName String
Name of Python package
parameters List<String>
Parameters for the task

JobQueue
, JobQueueArgs

Enabled This property is required. bool
If true, enable queueing for the job.
Enabled This property is required. bool
If true, enable queueing for the job.
enabled This property is required. Boolean
If true, enable queueing for the job.
enabled This property is required. boolean
If true, enable queueing for the job.
enabled This property is required. bool
If true, enable queueing for the job.
enabled This property is required. Boolean
If true, enable queueing for the job.

JobRunAs
, JobRunAsArgs

ServicePrincipalName string

The application ID of an active service principal. Setting this field requires the servicePrincipal/user role.

Example:

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});

import pulumi
import pulumi_databricks as databricks

this = databricks.Job("this", run_as={
    "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var @this = new Databricks.Job("this", new()
    {
        RunAs = new Databricks.Inputs.JobRunAsArgs
        {
            ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
			RunAs: &databricks.JobRunAsArgs{
				ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobRunAsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var this_ = new Job("this", JobArgs.builder()
            .runAs(JobRunAsArgs.builder()
                .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349")
                .build())
            .build());

    }
}
resources:
  this:
    type: databricks:Job
    properties:
      runAs:
        servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
ServicePrincipalName string

The application ID of an active service principal. Setting this field requires the servicePrincipal/user role.

Example:

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});

import pulumi
import pulumi_databricks as databricks

this = databricks.Job("this", run_as={
    "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var @this = new Databricks.Job("this", new()
    {
        RunAs = new Databricks.Inputs.JobRunAsArgs
        {
            ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
			RunAs: &databricks.JobRunAsArgs{
				ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobRunAsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var this_ = new Job("this", JobArgs.builder()
            .runAs(JobRunAsArgs.builder()
                .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349")
                .build())
            .build());

    }
}
resources:
  this:
    type: databricks:Job
    properties:
      runAs:
        servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
servicePrincipalName String

The application ID of an active service principal. Setting this field requires the servicePrincipal/user role.

Example:

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});

import pulumi
import pulumi_databricks as databricks

this = databricks.Job("this", run_as={
    "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var @this = new Databricks.Job("this", new()
    {
        RunAs = new Databricks.Inputs.JobRunAsArgs
        {
            ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
			RunAs: &databricks.JobRunAsArgs{
				ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobRunAsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var this_ = new Job("this", JobArgs.builder()
            .runAs(JobRunAsArgs.builder()
                .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349")
                .build())
            .build());

    }
}
resources:
  this:
    type: databricks:Job
    properties:
      runAs:
        servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.
servicePrincipalName string

The application ID of an active service principal. Setting this field requires the servicePrincipal/user role.

Example:

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});

import pulumi
import pulumi_databricks as databricks

this = databricks.Job("this", run_as={
    "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var @this = new Databricks.Job("this", new()
    {
        RunAs = new Databricks.Inputs.JobRunAsArgs
        {
            ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
			RunAs: &databricks.JobRunAsArgs{
				ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobRunAsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var this_ = new Job("this", JobArgs.builder()
            .runAs(JobRunAsArgs.builder()
                .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349")
                .build())
            .build());

    }
}
resources:
  this:
    type: databricks:Job
    properties:
      runAs:
        servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
userName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
service_principal_name str

The application ID of an active service principal. Setting this field requires the servicePrincipal/user role.

Example:

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});

import pulumi
import pulumi_databricks as databricks

this = databricks.Job("this", run_as={
    "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var @this = new Databricks.Job("this", new()
    {
        RunAs = new Databricks.Inputs.JobRunAsArgs
        {
            ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
			RunAs: &databricks.JobRunAsArgs{
				ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobRunAsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var this_ = new Job("this", JobArgs.builder()
            .runAs(JobRunAsArgs.builder()
                .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349")
                .build())
            .build());

    }
}
resources:
  this:
    type: databricks:Job
    properties:
      runAs:
        servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
user_name str
The email of an active workspace user. Non-admin users can only set this field to their own email.
servicePrincipalName String

The application ID of an active service principal. Setting this field requires the servicePrincipal/user role.

Example:

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});

import pulumi
import pulumi_databricks as databricks

this = databricks.Job("this", run_as={
    "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
})
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var @this = new Databricks.Job("this", new()
    {
        RunAs = new Databricks.Inputs.JobRunAsArgs
        {
            ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349",
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
			RunAs: &databricks.JobRunAsArgs{
				ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobRunAsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var this_ = new Job("this", JobArgs.builder()
            .runAs(JobRunAsArgs.builder()
                .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349")
                .build())
            .build());

    }
}
resources:
  this:
    type: databricks:Job
    properties:
      runAs:
        servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.

JobRunJobTask
, JobRunJobTaskArgs

JobId This property is required. int
(String) ID of the job
JobParameters Dictionary<string, string>
(Map) Job parameters for the task
JobId This property is required. int
(String) ID of the job
JobParameters map[string]string
(Map) Job parameters for the task
jobId This property is required. Integer
(String) ID of the job
jobParameters Map<String,String>
(Map) Job parameters for the task
jobId This property is required. number
(String) ID of the job
jobParameters {[key: string]: string}
(Map) Job parameters for the task
job_id This property is required. int
(String) ID of the job
job_parameters Mapping[str, str]
(Map) Job parameters for the task
jobId This property is required. Number
(String) ID of the job
jobParameters Map<String>
(Map) Job parameters for the task

JobSchedule
, JobScheduleArgs

QuartzCronExpression This property is required. string
A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
TimezoneId This property is required. string
A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
PauseStatus string
Indicate whether this schedule is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted and a schedule is provided, the server will default to using UNPAUSED as a value for pause_status.
QuartzCronExpression This property is required. string
A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
TimezoneId This property is required. string
A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
PauseStatus string
Indicate whether this schedule is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted and a schedule is provided, the server will default to using UNPAUSED as a value for pause_status.
quartzCronExpression This property is required. String
A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
timezoneId This property is required. String
A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
pauseStatus String
Indicate whether this schedule is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted and a schedule is provided, the server will default to using UNPAUSED as a value for pause_status.
quartzCronExpression This property is required. string
A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
timezoneId This property is required. string
A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
pauseStatus string
Indicate whether this schedule is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted and a schedule is provided, the server will default to using UNPAUSED as a value for pause_status.
quartz_cron_expression This property is required. str
A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
timezone_id This property is required. str
A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
pause_status str
Indicate whether this schedule is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted and a schedule is provided, the server will default to using UNPAUSED as a value for pause_status.
quartzCronExpression This property is required. String
A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
timezoneId This property is required. String
A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
pauseStatus String
Indicate whether this schedule is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted and a schedule is provided, the server will default to using UNPAUSED as a value for pause_status.

JobSparkJarTask
, JobSparkJarTaskArgs

JarUri string
MainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
Parameters List<string>
(List) Parameters passed to the main method.
JarUri string
MainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
Parameters []string
(List) Parameters passed to the main method.
jarUri String
mainClassName String
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters List<String>
(List) Parameters passed to the main method.
jarUri string
mainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters string[]
(List) Parameters passed to the main method.
jar_uri str
main_class_name str
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters Sequence[str]
(List) Parameters passed to the main method.
jarUri String
mainClassName String
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters List<String>
(List) Parameters passed to the main method.

JobSparkPythonTask
, JobSparkPythonTaskArgs

PythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
Parameters List<string>
(List) Command line parameters passed to the Python file.
Source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
PythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
Parameters []string
(List) Command line parameters passed to the Python file.
Source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. String
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters List<String>
(List) Command line parameters passed to the Python file.
source String
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters string[]
(List) Command line parameters passed to the Python file.
source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
python_file This property is required. str
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters Sequence[str]
(List) Command line parameters passed to the Python file.
source str
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. String
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters List<String>
(List) Command line parameters passed to the Python file.
source String
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.

JobSparkSubmitTask
, JobSparkSubmitTaskArgs

Parameters List<string>
(List) Command-line parameters passed to spark submit.
Parameters []string
(List) Command-line parameters passed to spark submit.
parameters List<String>
(List) Command-line parameters passed to spark submit.
parameters string[]
(List) Command-line parameters passed to spark submit.
parameters Sequence[str]
(List) Command-line parameters passed to spark submit.
parameters List<String>
(List) Command-line parameters passed to spark submit.

JobTask
, JobTaskArgs

TaskKey This property is required. string
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
CleanRoomsNotebookTask JobTaskCleanRoomsNotebookTask
ConditionTask JobTaskConditionTask
DbtTask JobTaskDbtTask
DependsOns List<JobTaskDependsOn>
block specifying dependency(-ies) for a given task.
Description string
description for this task.
DisableAutoOptimization bool
A flag to disable auto optimization in serverless tasks.
EmailNotifications JobTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
EnvironmentKey string
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
ExistingClusterId string
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
ForEachTask JobTaskForEachTask
GenAiComputeTask JobTaskGenAiComputeTask
Health JobTaskHealth
block described below that specifies health conditions for a given task.
JobClusterKey string
Identifier of the Job cluster specified in the job_cluster block.
Libraries List<JobTaskLibrary>
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
MaxRetries int
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
MinRetryIntervalMillis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
NewCluster JobTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
NotebookTask JobTaskNotebookTask
NotificationSettings JobTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
PipelineTask JobTaskPipelineTask
PythonWheelTask JobTaskPythonWheelTask
RetryOnTimeout bool
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
RunIf string
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
RunJobTask JobTaskRunJobTask
SparkJarTask JobTaskSparkJarTask
SparkPythonTask JobTaskSparkPythonTask
SparkSubmitTask JobTaskSparkSubmitTask
SqlTask JobTaskSqlTask
TimeoutSeconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
WebhookNotifications JobTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

TaskKey This property is required. string
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
CleanRoomsNotebookTask JobTaskCleanRoomsNotebookTask
ConditionTask JobTaskConditionTask
DbtTask JobTaskDbtTask
DependsOns []JobTaskDependsOn
block specifying dependency(-ies) for a given task.
Description string
description for this task.
DisableAutoOptimization bool
A flag to disable auto optimization in serverless tasks.
EmailNotifications JobTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
EnvironmentKey string
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
ExistingClusterId string
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
ForEachTask JobTaskForEachTask
GenAiComputeTask JobTaskGenAiComputeTask
Health JobTaskHealth
block described below that specifies health conditions for a given task.
JobClusterKey string
Identifier of the Job cluster specified in the job_cluster block.
Libraries []JobTaskLibrary
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
MaxRetries int
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
MinRetryIntervalMillis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
NewCluster JobTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
NotebookTask JobTaskNotebookTask
NotificationSettings JobTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
PipelineTask JobTaskPipelineTask
PythonWheelTask JobTaskPythonWheelTask
RetryOnTimeout bool
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
RunIf string
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
RunJobTask JobTaskRunJobTask
SparkJarTask JobTaskSparkJarTask
SparkPythonTask JobTaskSparkPythonTask
SparkSubmitTask JobTaskSparkSubmitTask
SqlTask JobTaskSqlTask
TimeoutSeconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
WebhookNotifications JobTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

taskKey This property is required. String
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
cleanRoomsNotebookTask JobTaskCleanRoomsNotebookTask
conditionTask JobTaskConditionTask
dbtTask JobTaskDbtTask
dependsOns List<JobTaskDependsOn>
block specifying dependency(-ies) for a given task.
description String
description for this task.
disableAutoOptimization Boolean
A flag to disable auto optimization in serverless tasks.
emailNotifications JobTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
environmentKey String
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
existingClusterId String
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
forEachTask JobTaskForEachTask
genAiComputeTask JobTaskGenAiComputeTask
health JobTaskHealth
block described below that specifies health conditions for a given task.
jobClusterKey String
Identifier of the Job cluster specified in the job_cluster block.
libraries List<JobTaskLibrary>
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
maxRetries Integer
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
minRetryIntervalMillis Integer
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
newCluster JobTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
notebookTask JobTaskNotebookTask
notificationSettings JobTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
pipelineTask JobTaskPipelineTask
pythonWheelTask JobTaskPythonWheelTask
retryOnTimeout Boolean
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
runIf String
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
runJobTask JobTaskRunJobTask
sparkJarTask JobTaskSparkJarTask
sparkPythonTask JobTaskSparkPythonTask
sparkSubmitTask JobTaskSparkSubmitTask
sqlTask JobTaskSqlTask
timeoutSeconds Integer
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
webhookNotifications JobTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

taskKey This property is required. string
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
cleanRoomsNotebookTask JobTaskCleanRoomsNotebookTask
conditionTask JobTaskConditionTask
dbtTask JobTaskDbtTask
dependsOns JobTaskDependsOn[]
block specifying dependency(-ies) for a given task.
description string
description for this task.
disableAutoOptimization boolean
A flag to disable auto optimization in serverless tasks.
emailNotifications JobTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
environmentKey string
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
existingClusterId string
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
forEachTask JobTaskForEachTask
genAiComputeTask JobTaskGenAiComputeTask
health JobTaskHealth
block described below that specifies health conditions for a given task.
jobClusterKey string
Identifier of the Job cluster specified in the job_cluster block.
libraries JobTaskLibrary[]
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
maxRetries number
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
minRetryIntervalMillis number
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
newCluster JobTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
notebookTask JobTaskNotebookTask
notificationSettings JobTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
pipelineTask JobTaskPipelineTask
pythonWheelTask JobTaskPythonWheelTask
retryOnTimeout boolean
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
runIf string
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
runJobTask JobTaskRunJobTask
sparkJarTask JobTaskSparkJarTask
sparkPythonTask JobTaskSparkPythonTask
sparkSubmitTask JobTaskSparkSubmitTask
sqlTask JobTaskSqlTask
timeoutSeconds number
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
webhookNotifications JobTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

task_key This property is required. str
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
clean_rooms_notebook_task JobTaskCleanRoomsNotebookTask
condition_task JobTaskConditionTask
dbt_task JobTaskDbtTask
depends_ons Sequence[JobTaskDependsOn]
block specifying dependency(-ies) for a given task.
description str
description for this task.
disable_auto_optimization bool
A flag to disable auto optimization in serverless tasks.
email_notifications JobTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
environment_key str
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
existing_cluster_id str
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
for_each_task JobTaskForEachTask
gen_ai_compute_task JobTaskGenAiComputeTask
health JobTaskHealth
block described below that specifies health conditions for a given task.
job_cluster_key str
Identifier of the Job cluster specified in the job_cluster block.
libraries Sequence[JobTaskLibrary]
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
max_retries int
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
min_retry_interval_millis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
new_cluster JobTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
notebook_task JobTaskNotebookTask
notification_settings JobTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
pipeline_task JobTaskPipelineTask
python_wheel_task JobTaskPythonWheelTask
retry_on_timeout bool
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
run_if str
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
run_job_task JobTaskRunJobTask
spark_jar_task JobTaskSparkJarTask
spark_python_task JobTaskSparkPythonTask
spark_submit_task JobTaskSparkSubmitTask
sql_task JobTaskSqlTask
timeout_seconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
webhook_notifications JobTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

taskKey This property is required. String
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
cleanRoomsNotebookTask Property Map
conditionTask Property Map
dbtTask Property Map
dependsOns List<Property Map>
block specifying dependency(-ies) for a given task.
description String
description for this task.
disableAutoOptimization Boolean
A flag to disable auto optimization in serverless tasks.
emailNotifications Property Map
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
environmentKey String
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
existingClusterId String
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
forEachTask Property Map
genAiComputeTask Property Map
health Property Map
block described below that specifies health conditions for a given task.
jobClusterKey String
Identifier of the Job cluster specified in the job_cluster block.
libraries List<Property Map>
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
maxRetries Number
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
minRetryIntervalMillis Number
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
newCluster Property Map
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
notebookTask Property Map
notificationSettings Property Map
An optional block controlling the notification settings on the job level documented below.
pipelineTask Property Map
pythonWheelTask Property Map
retryOnTimeout Boolean
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
runIf String
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
runJobTask Property Map
sparkJarTask Property Map
sparkPythonTask Property Map
sparkSubmitTask Property Map
sqlTask Property Map
timeoutSeconds Number
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
webhookNotifications Property Map

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

JobTaskCleanRoomsNotebookTask
, JobTaskCleanRoomsNotebookTaskArgs

CleanRoomName This property is required. string
The clean room that the notebook belongs to.
NotebookName This property is required. string
Name of the notebook being run.
Etag string
Checksum to validate the freshness of the notebook resource.
NotebookBaseParameters Dictionary<string, string>
Base parameters to be used for the clean room notebook job.
CleanRoomName This property is required. string
The clean room that the notebook belongs to.
NotebookName This property is required. string
Name of the notebook being run.
Etag string
Checksum to validate the freshness of the notebook resource.
NotebookBaseParameters map[string]string
Base parameters to be used for the clean room notebook job.
cleanRoomName This property is required. String
The clean room that the notebook belongs to.
notebookName This property is required. String
Name of the notebook being run.
etag String
Checksum to validate the freshness of the notebook resource.
notebookBaseParameters Map<String,String>
Base parameters to be used for the clean room notebook job.
cleanRoomName This property is required. string
The clean room that the notebook belongs to.
notebookName This property is required. string
Name of the notebook being run.
etag string
Checksum to validate the freshness of the notebook resource.
notebookBaseParameters {[key: string]: string}
Base parameters to be used for the clean room notebook job.
clean_room_name This property is required. str
The clean room that the notebook belongs to.
notebook_name This property is required. str
Name of the notebook being run.
etag str
Checksum to validate the freshness of the notebook resource.
notebook_base_parameters Mapping[str, str]
Base parameters to be used for the clean room notebook job.
cleanRoomName This property is required. String
The clean room that the notebook belongs to.
notebookName This property is required. String
Name of the notebook being run.
etag String
Checksum to validate the freshness of the notebook resource.
notebookBaseParameters Map<String>
Base parameters to be used for the clean room notebook job.

JobTaskConditionTask
, JobTaskConditionTaskArgs

Left This property is required. string
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
Op This property is required. string

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

Right This property is required. string
The right operand of the condition task. It could be a string value, job state, or parameter reference.
Left This property is required. string
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
Op This property is required. string

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

Right This property is required. string
The right operand of the condition task. It could be a string value, job state, or parameter reference.
left This property is required. String
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
op This property is required. String

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

right This property is required. String
The right operand of the condition task. It could be a string value, job state, or parameter reference.
left This property is required. string
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
op This property is required. string

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

right This property is required. string
The right operand of the condition task. It could be a string value, job state, or parameter reference.
left This property is required. str
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
op This property is required. str

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

right This property is required. str
The right operand of the condition task. It could be a string value, job state, or parameter reference.
left This property is required. String
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
op This property is required. String

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

right This property is required. String
The right operand of the condition task. It could be a string value, job state, or parameter reference.

JobTaskDbtTask
, JobTaskDbtTaskArgs

Commands This property is required. List<string>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
Catalog string
The name of the catalog to use inside Unity Catalog.
ProfilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
ProjectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
Schema string
The name of the schema dbt should run in. Defaults to default.
Source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
WarehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

Commands This property is required. []string
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
Catalog string
The name of the catalog to use inside Unity Catalog.
ProfilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
ProjectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
Schema string
The name of the schema dbt should run in. Defaults to default.
Source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
WarehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. List<String>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog String
The name of the catalog to use inside Unity Catalog.
profilesDirectory String
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory String
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema String
The name of the schema dbt should run in. Defaults to default.
source String
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId String

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. string[]
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog string
The name of the catalog to use inside Unity Catalog.
profilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema string
The name of the schema dbt should run in. Defaults to default.
source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. Sequence[str]
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog str
The name of the catalog to use inside Unity Catalog.
profiles_directory str
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
project_directory str
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema str
The name of the schema dbt should run in. Defaults to default.
source str
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouse_id str

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. List<String>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog String
The name of the catalog to use inside Unity Catalog.
profilesDirectory String
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory String
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema String
The name of the schema dbt should run in. Defaults to default.
source String
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId String

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

JobTaskDependsOn
, JobTaskDependsOnArgs

TaskKey This property is required. string
The name of the task this task depends on.
Outcome string

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

TaskKey This property is required. string
The name of the task this task depends on.
Outcome string

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

taskKey This property is required. String
The name of the task this task depends on.
outcome String

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

taskKey This property is required. string
The name of the task this task depends on.
outcome string

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

task_key This property is required. str
The name of the task this task depends on.
outcome str

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

taskKey This property is required. String
The name of the task this task depends on.
outcome String

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

JobTaskEmailNotifications
, JobTaskEmailNotificationsArgs

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
OnDurationWarningThresholdExceededs List<string>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

OnFailures List<string>
(List) list of emails to notify when the run fails.
OnStarts List<string>
(List) list of emails to notify when the run starts.
OnStreamingBacklogExceededs List<string>
OnSuccesses List<string>
(List) list of emails to notify when the run completes successfully.
NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
OnDurationWarningThresholdExceededs []string

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

OnFailures []string
(List) list of emails to notify when the run fails.
OnStarts []string
(List) list of emails to notify when the run starts.
OnStreamingBacklogExceededs []string
OnSuccesses []string
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs List<String>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures List<String>
(List) list of emails to notify when the run fails.
onStarts List<String>
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs List<String>
onSuccesses List<String>
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs string[]

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures string[]
(List) list of emails to notify when the run fails.
onStarts string[]
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs string[]
onSuccesses string[]
(List) list of emails to notify when the run completes successfully.
no_alert_for_skipped_runs bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
on_duration_warning_threshold_exceededs Sequence[str]

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

on_failures Sequence[str]
(List) list of emails to notify when the run fails.
on_starts Sequence[str]
(List) list of emails to notify when the run starts.
on_streaming_backlog_exceededs Sequence[str]
on_successes Sequence[str]
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs List<String>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures List<String>
(List) list of emails to notify when the run fails.
onStarts List<String>
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs List<String>
onSuccesses List<String>
(List) list of emails to notify when the run completes successfully.

JobTaskForEachTask
, JobTaskForEachTaskArgs

Inputs This property is required. string
(String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
Task This property is required. JobTaskForEachTaskTask
Task to run against the inputs list.
Concurrency int
Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
Inputs This property is required. string
(String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
Task This property is required. JobTaskForEachTaskTask
Task to run against the inputs list.
Concurrency int
Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
inputs This property is required. String
(String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
task This property is required. JobTaskForEachTaskTask
Task to run against the inputs list.
concurrency Integer
Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
inputs This property is required. string
(String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
task This property is required. JobTaskForEachTaskTask
Task to run against the inputs list.
concurrency number
Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
inputs This property is required. str
(String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
task This property is required. JobTaskForEachTaskTask
Task to run against the inputs list.
concurrency int
Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
inputs This property is required. String
(String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
task This property is required. Property Map
Task to run against the inputs list.
concurrency Number
Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.

JobTaskForEachTaskTask
, JobTaskForEachTaskTaskArgs

TaskKey This property is required. string
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
CleanRoomsNotebookTask JobTaskForEachTaskTaskCleanRoomsNotebookTask
ConditionTask JobTaskForEachTaskTaskConditionTask
DbtTask JobTaskForEachTaskTaskDbtTask
DependsOns List<JobTaskForEachTaskTaskDependsOn>
block specifying dependency(-ies) for a given task.
Description string
description for this task.
DisableAutoOptimization bool
A flag to disable auto optimization in serverless tasks.
EmailNotifications JobTaskForEachTaskTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
EnvironmentKey string
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
ExistingClusterId string
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
GenAiComputeTask JobTaskForEachTaskTaskGenAiComputeTask
Health JobTaskForEachTaskTaskHealth
block described below that specifies health conditions for a given task.
JobClusterKey string
Identifier of the Job cluster specified in the job_cluster block.
Libraries List<JobTaskForEachTaskTaskLibrary>
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
MaxRetries int
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
MinRetryIntervalMillis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
NewCluster JobTaskForEachTaskTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
NotebookTask JobTaskForEachTaskTaskNotebookTask
NotificationSettings JobTaskForEachTaskTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
PipelineTask JobTaskForEachTaskTaskPipelineTask
PythonWheelTask JobTaskForEachTaskTaskPythonWheelTask
RetryOnTimeout bool
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
RunIf string
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
RunJobTask JobTaskForEachTaskTaskRunJobTask
SparkJarTask JobTaskForEachTaskTaskSparkJarTask
SparkPythonTask JobTaskForEachTaskTaskSparkPythonTask
SparkSubmitTask JobTaskForEachTaskTaskSparkSubmitTask
SqlTask JobTaskForEachTaskTaskSqlTask
TimeoutSeconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
WebhookNotifications JobTaskForEachTaskTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

TaskKey This property is required. string
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
CleanRoomsNotebookTask JobTaskForEachTaskTaskCleanRoomsNotebookTask
ConditionTask JobTaskForEachTaskTaskConditionTask
DbtTask JobTaskForEachTaskTaskDbtTask
DependsOns []JobTaskForEachTaskTaskDependsOn
block specifying dependency(-ies) for a given task.
Description string
description for this task.
DisableAutoOptimization bool
A flag to disable auto optimization in serverless tasks.
EmailNotifications JobTaskForEachTaskTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
EnvironmentKey string
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
ExistingClusterId string
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
GenAiComputeTask JobTaskForEachTaskTaskGenAiComputeTask
Health JobTaskForEachTaskTaskHealth
block described below that specifies health conditions for a given task.
JobClusterKey string
Identifier of the Job cluster specified in the job_cluster block.
Libraries []JobTaskForEachTaskTaskLibrary
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
MaxRetries int
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
MinRetryIntervalMillis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
NewCluster JobTaskForEachTaskTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
NotebookTask JobTaskForEachTaskTaskNotebookTask
NotificationSettings JobTaskForEachTaskTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
PipelineTask JobTaskForEachTaskTaskPipelineTask
PythonWheelTask JobTaskForEachTaskTaskPythonWheelTask
RetryOnTimeout bool
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
RunIf string
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
RunJobTask JobTaskForEachTaskTaskRunJobTask
SparkJarTask JobTaskForEachTaskTaskSparkJarTask
SparkPythonTask JobTaskForEachTaskTaskSparkPythonTask
SparkSubmitTask JobTaskForEachTaskTaskSparkSubmitTask
SqlTask JobTaskForEachTaskTaskSqlTask
TimeoutSeconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
WebhookNotifications JobTaskForEachTaskTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

taskKey This property is required. String
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
cleanRoomsNotebookTask JobTaskForEachTaskTaskCleanRoomsNotebookTask
conditionTask JobTaskForEachTaskTaskConditionTask
dbtTask JobTaskForEachTaskTaskDbtTask
dependsOns List<JobTaskForEachTaskTaskDependsOn>
block specifying dependency(-ies) for a given task.
description String
description for this task.
disableAutoOptimization Boolean
A flag to disable auto optimization in serverless tasks.
emailNotifications JobTaskForEachTaskTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
environmentKey String
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
existingClusterId String
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
genAiComputeTask JobTaskForEachTaskTaskGenAiComputeTask
health JobTaskForEachTaskTaskHealth
block described below that specifies health conditions for a given task.
jobClusterKey String
Identifier of the Job cluster specified in the job_cluster block.
libraries List<JobTaskForEachTaskTaskLibrary>
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
maxRetries Integer
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
minRetryIntervalMillis Integer
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
newCluster JobTaskForEachTaskTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
notebookTask JobTaskForEachTaskTaskNotebookTask
notificationSettings JobTaskForEachTaskTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
pipelineTask JobTaskForEachTaskTaskPipelineTask
pythonWheelTask JobTaskForEachTaskTaskPythonWheelTask
retryOnTimeout Boolean
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
runIf String
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
runJobTask JobTaskForEachTaskTaskRunJobTask
sparkJarTask JobTaskForEachTaskTaskSparkJarTask
sparkPythonTask JobTaskForEachTaskTaskSparkPythonTask
sparkSubmitTask JobTaskForEachTaskTaskSparkSubmitTask
sqlTask JobTaskForEachTaskTaskSqlTask
timeoutSeconds Integer
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
webhookNotifications JobTaskForEachTaskTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

taskKey This property is required. string
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
cleanRoomsNotebookTask JobTaskForEachTaskTaskCleanRoomsNotebookTask
conditionTask JobTaskForEachTaskTaskConditionTask
dbtTask JobTaskForEachTaskTaskDbtTask
dependsOns JobTaskForEachTaskTaskDependsOn[]
block specifying dependency(-ies) for a given task.
description string
description for this task.
disableAutoOptimization boolean
A flag to disable auto optimization in serverless tasks.
emailNotifications JobTaskForEachTaskTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
environmentKey string
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
existingClusterId string
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
genAiComputeTask JobTaskForEachTaskTaskGenAiComputeTask
health JobTaskForEachTaskTaskHealth
block described below that specifies health conditions for a given task.
jobClusterKey string
Identifier of the Job cluster specified in the job_cluster block.
libraries JobTaskForEachTaskTaskLibrary[]
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
maxRetries number
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
minRetryIntervalMillis number
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
newCluster JobTaskForEachTaskTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
notebookTask JobTaskForEachTaskTaskNotebookTask
notificationSettings JobTaskForEachTaskTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
pipelineTask JobTaskForEachTaskTaskPipelineTask
pythonWheelTask JobTaskForEachTaskTaskPythonWheelTask
retryOnTimeout boolean
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
runIf string
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
runJobTask JobTaskForEachTaskTaskRunJobTask
sparkJarTask JobTaskForEachTaskTaskSparkJarTask
sparkPythonTask JobTaskForEachTaskTaskSparkPythonTask
sparkSubmitTask JobTaskForEachTaskTaskSparkSubmitTask
sqlTask JobTaskForEachTaskTaskSqlTask
timeoutSeconds number
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
webhookNotifications JobTaskForEachTaskTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

task_key This property is required. str
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
clean_rooms_notebook_task JobTaskForEachTaskTaskCleanRoomsNotebookTask
condition_task JobTaskForEachTaskTaskConditionTask
dbt_task JobTaskForEachTaskTaskDbtTask
depends_ons Sequence[JobTaskForEachTaskTaskDependsOn]
block specifying dependency(-ies) for a given task.
description str
description for this task.
disable_auto_optimization bool
A flag to disable auto optimization in serverless tasks.
email_notifications JobTaskForEachTaskTaskEmailNotifications
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
environment_key str
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
existing_cluster_id str
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
gen_ai_compute_task JobTaskForEachTaskTaskGenAiComputeTask
health JobTaskForEachTaskTaskHealth
block described below that specifies health conditions for a given task.
job_cluster_key str
Identifier of the Job cluster specified in the job_cluster block.
libraries Sequence[JobTaskForEachTaskTaskLibrary]
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
max_retries int
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
min_retry_interval_millis int
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
new_cluster JobTaskForEachTaskTaskNewCluster
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
notebook_task JobTaskForEachTaskTaskNotebookTask
notification_settings JobTaskForEachTaskTaskNotificationSettings
An optional block controlling the notification settings on the job level documented below.
pipeline_task JobTaskForEachTaskTaskPipelineTask
python_wheel_task JobTaskForEachTaskTaskPythonWheelTask
retry_on_timeout bool
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
run_if str
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
run_job_task JobTaskForEachTaskTaskRunJobTask
spark_jar_task JobTaskForEachTaskTaskSparkJarTask
spark_python_task JobTaskForEachTaskTaskSparkPythonTask
spark_submit_task JobTaskForEachTaskTaskSparkSubmitTask
sql_task JobTaskForEachTaskTaskSqlTask
timeout_seconds int
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
webhook_notifications JobTaskForEachTaskTaskWebhookNotifications

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

taskKey This property is required. String
string specifying an unique key for a given task.

  • *_task - (Required) one of the specific task blocks described below:
cleanRoomsNotebookTask Property Map
conditionTask Property Map
dbtTask Property Map
dependsOns List<Property Map>
block specifying dependency(-ies) for a given task.
description String
description for this task.
disableAutoOptimization Boolean
A flag to disable auto optimization in serverless tasks.
emailNotifications Property Map
An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
environmentKey String
identifier of an environment block that is used to specify libraries. Required for some tasks (spark_python_task, python_wheel_task, ...) running on serverless compute.
existingClusterId String
Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
genAiComputeTask Property Map
health Property Map
block described below that specifies health conditions for a given task.
jobClusterKey String
Identifier of the Job cluster specified in the job_cluster block.
libraries List<Property Map>
(Set) An optional list of libraries to be installed on the cluster that will execute the job.
maxRetries Number
(Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a FAILED or INTERNAL_ERROR lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state: PENDING, RUNNING, TERMINATING, TERMINATED, SKIPPED or INTERNAL_ERROR.
minRetryIntervalMillis Number
(Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
newCluster Property Map
Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as autotermination_minutes, is_pinned, workload_type aren't supported!
notebookTask Property Map
notificationSettings Property Map
An optional block controlling the notification settings on the job level documented below.
pipelineTask Property Map
pythonWheelTask Property Map
retryOnTimeout Boolean
(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
runIf String
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of ALL_SUCCESS, AT_LEAST_ONE_SUCCESS, NONE_FAILED, ALL_DONE, AT_LEAST_ONE_FAILED or ALL_FAILED. When omitted, defaults to ALL_SUCCESS.
runJobTask Property Map
sparkJarTask Property Map
sparkPythonTask Property Map
sparkSubmitTask Property Map
sqlTask Property Map
timeoutSeconds Number
(Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
webhookNotifications Property Map

(List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.

If no job_cluster_key, existing_cluster_id, or new_cluster were specified in task definition, then task will executed using serverless compute.

JobTaskForEachTaskTaskCleanRoomsNotebookTask
, JobTaskForEachTaskTaskCleanRoomsNotebookTaskArgs

CleanRoomName This property is required. string
The clean room that the notebook belongs to.
NotebookName This property is required. string
Name of the notebook being run.
Etag string
Checksum to validate the freshness of the notebook resource.
NotebookBaseParameters Dictionary<string, string>
Base parameters to be used for the clean room notebook job.
CleanRoomName This property is required. string
The clean room that the notebook belongs to.
NotebookName This property is required. string
Name of the notebook being run.
Etag string
Checksum to validate the freshness of the notebook resource.
NotebookBaseParameters map[string]string
Base parameters to be used for the clean room notebook job.
cleanRoomName This property is required. String
The clean room that the notebook belongs to.
notebookName This property is required. String
Name of the notebook being run.
etag String
Checksum to validate the freshness of the notebook resource.
notebookBaseParameters Map<String,String>
Base parameters to be used for the clean room notebook job.
cleanRoomName This property is required. string
The clean room that the notebook belongs to.
notebookName This property is required. string
Name of the notebook being run.
etag string
Checksum to validate the freshness of the notebook resource.
notebookBaseParameters {[key: string]: string}
Base parameters to be used for the clean room notebook job.
clean_room_name This property is required. str
The clean room that the notebook belongs to.
notebook_name This property is required. str
Name of the notebook being run.
etag str
Checksum to validate the freshness of the notebook resource.
notebook_base_parameters Mapping[str, str]
Base parameters to be used for the clean room notebook job.
cleanRoomName This property is required. String
The clean room that the notebook belongs to.
notebookName This property is required. String
Name of the notebook being run.
etag String
Checksum to validate the freshness of the notebook resource.
notebookBaseParameters Map<String>
Base parameters to be used for the clean room notebook job.

JobTaskForEachTaskTaskConditionTask
, JobTaskForEachTaskTaskConditionTaskArgs

Left This property is required. string
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
Op This property is required. string

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

Right This property is required. string
The right operand of the condition task. It could be a string value, job state, or parameter reference.
Left This property is required. string
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
Op This property is required. string

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

Right This property is required. string
The right operand of the condition task. It could be a string value, job state, or parameter reference.
left This property is required. String
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
op This property is required. String

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

right This property is required. String
The right operand of the condition task. It could be a string value, job state, or parameter reference.
left This property is required. string
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
op This property is required. string

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

right This property is required. string
The right operand of the condition task. It could be a string value, job state, or parameter reference.
left This property is required. str
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
op This property is required. str

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

right This property is required. str
The right operand of the condition task. It could be a string value, job state, or parameter reference.
left This property is required. String
The left operand of the condition task. It could be a string value, job state, or a parameter reference.
op This property is required. String

The string specifying the operation used to compare operands. Currently, following operators are supported: EQUAL_TO, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, NOT_EQUAL. (Check the API docs for the latest information).

This task does not require a cluster to execute and does not support retries or notifications.

right This property is required. String
The right operand of the condition task. It could be a string value, job state, or parameter reference.

JobTaskForEachTaskTaskDbtTask
, JobTaskForEachTaskTaskDbtTaskArgs

Commands This property is required. List<string>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
Catalog string
The name of the catalog to use inside Unity Catalog.
ProfilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
ProjectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
Schema string
The name of the schema dbt should run in. Defaults to default.
Source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
WarehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

Commands This property is required. []string
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
Catalog string
The name of the catalog to use inside Unity Catalog.
ProfilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
ProjectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
Schema string
The name of the schema dbt should run in. Defaults to default.
Source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
WarehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. List<String>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog String
The name of the catalog to use inside Unity Catalog.
profilesDirectory String
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory String
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema String
The name of the schema dbt should run in. Defaults to default.
source String
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId String

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. string[]
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog string
The name of the catalog to use inside Unity Catalog.
profilesDirectory string
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory string
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema string
The name of the schema dbt should run in. Defaults to default.
source string
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId string

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. Sequence[str]
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog str
The name of the catalog to use inside Unity Catalog.
profiles_directory str
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
project_directory str
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema str
The name of the schema dbt should run in. Defaults to default.
source str
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouse_id str

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

commands This property is required. List<String>
(Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
catalog String
The name of the catalog to use inside Unity Catalog.
profilesDirectory String
The relative path to the directory in the repository specified by git_source where dbt should look in for the profiles.yml file. If not specified, defaults to the repository's root directory. Equivalent to passing --profile-dir to a dbt command.
projectDirectory String
The path where dbt should look for dbt_project.yml. Equivalent to passing --project-dir to the dbt CLI.

  • If source is GIT: Relative path to the directory in the repository specified in the git_source block. Defaults to the repository's root directory when not specified.
  • If source is WORKSPACE: Absolute path to the folder in the workspace.
schema String
The name of the schema dbt should run in. Defaults to default.
source String
The source of the project. Possible values are WORKSPACE and GIT. Defaults to GIT if a git_source block is present in the job definition.
warehouseId String

The ID of the SQL warehouse that dbt should execute against.

You also need to include a git_source block to configure the repository that contains the dbt project.

JobTaskForEachTaskTaskDependsOn
, JobTaskForEachTaskTaskDependsOnArgs

TaskKey This property is required. string
The name of the task this task depends on.
Outcome string

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

TaskKey This property is required. string
The name of the task this task depends on.
Outcome string

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

taskKey This property is required. String
The name of the task this task depends on.
outcome String

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

taskKey This property is required. string
The name of the task this task depends on.
outcome string

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

task_key This property is required. str
The name of the task this task depends on.
outcome str

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

taskKey This property is required. String
The name of the task this task depends on.
outcome String

Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are "true" or "false".

Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.

JobTaskForEachTaskTaskEmailNotifications
, JobTaskForEachTaskTaskEmailNotificationsArgs

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
OnDurationWarningThresholdExceededs List<string>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

OnFailures List<string>
(List) list of emails to notify when the run fails.
OnStarts List<string>
(List) list of emails to notify when the run starts.
OnStreamingBacklogExceededs List<string>
OnSuccesses List<string>
(List) list of emails to notify when the run completes successfully.
NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
OnDurationWarningThresholdExceededs []string

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

OnFailures []string
(List) list of emails to notify when the run fails.
OnStarts []string
(List) list of emails to notify when the run starts.
OnStreamingBacklogExceededs []string
OnSuccesses []string
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs List<String>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures List<String>
(List) list of emails to notify when the run fails.
onStarts List<String>
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs List<String>
onSuccesses List<String>
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs string[]

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures string[]
(List) list of emails to notify when the run fails.
onStarts string[]
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs string[]
onSuccesses string[]
(List) list of emails to notify when the run completes successfully.
no_alert_for_skipped_runs bool
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
on_duration_warning_threshold_exceededs Sequence[str]

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

on_failures Sequence[str]
(List) list of emails to notify when the run fails.
on_starts Sequence[str]
(List) list of emails to notify when the run starts.
on_streaming_backlog_exceededs Sequence[str]
on_successes Sequence[str]
(List) list of emails to notify when the run completes successfully.
noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the notification_settings configuration block).
onDurationWarningThresholdExceededs List<String>

(List) list of emails to notify when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

The following parameter is only available for the job level configuration.

onFailures List<String>
(List) list of emails to notify when the run fails.
onStarts List<String>
(List) list of emails to notify when the run starts.
onStreamingBacklogExceededs List<String>
onSuccesses List<String>
(List) list of emails to notify when the run completes successfully.

JobTaskForEachTaskTaskGenAiComputeTask
, JobTaskForEachTaskTaskGenAiComputeTaskArgs

JobTaskForEachTaskTaskGenAiComputeTaskCompute
, JobTaskForEachTaskTaskGenAiComputeTaskComputeArgs

GpuNodePoolId This property is required. string
NumGpus This property is required. int
GpuType string
GpuNodePoolId This property is required. string
NumGpus This property is required. int
GpuType string
gpuNodePoolId This property is required. String
numGpus This property is required. Integer
gpuType String
gpuNodePoolId This property is required. string
numGpus This property is required. number
gpuType string
gpu_node_pool_id This property is required. str
num_gpus This property is required. int
gpu_type str
gpuNodePoolId This property is required. String
numGpus This property is required. Number
gpuType String

JobTaskForEachTaskTaskHealth
, JobTaskForEachTaskTaskHealthArgs

Rules This property is required. List<JobTaskForEachTaskTaskHealthRule>
list of rules that are represented as objects with the following attributes:
Rules This property is required. []JobTaskForEachTaskTaskHealthRule
list of rules that are represented as objects with the following attributes:
rules This property is required. List<JobTaskForEachTaskTaskHealthRule>
list of rules that are represented as objects with the following attributes:
rules This property is required. JobTaskForEachTaskTaskHealthRule[]
list of rules that are represented as objects with the following attributes:
rules This property is required. Sequence[JobTaskForEachTaskTaskHealthRule]
list of rules that are represented as objects with the following attributes:
rules This property is required. List<Property Map>
list of rules that are represented as objects with the following attributes:

JobTaskForEachTaskTaskHealthRule
, JobTaskForEachTaskTaskHealthRuleArgs

Metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
Op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
Value This property is required. int
integer value used to compare to the given metric.
Metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
Op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
Value This property is required. int
integer value used to compare to the given metric.
metric This property is required. String
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. String
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. Integer
integer value used to compare to the given metric.
metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. number
integer value used to compare to the given metric.
metric This property is required. str
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. str
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. int
integer value used to compare to the given metric.
metric This property is required. String
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. String
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. Number
integer value used to compare to the given metric.

JobTaskForEachTaskTaskLibrary
, JobTaskForEachTaskTaskLibraryArgs

JobTaskForEachTaskTaskLibraryCran
, JobTaskForEachTaskTaskLibraryCranArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobTaskForEachTaskTaskLibraryMaven
, JobTaskForEachTaskTaskLibraryMavenArgs

Coordinates This property is required. string
Exclusions List<string>
Repo string
Coordinates This property is required. string
Exclusions []string
Repo string
coordinates This property is required. String
exclusions List<String>
repo String
coordinates This property is required. string
exclusions string[]
repo string
coordinates This property is required. str
exclusions Sequence[str]
repo str
coordinates This property is required. String
exclusions List<String>
repo String

JobTaskForEachTaskTaskLibraryPypi
, JobTaskForEachTaskTaskLibraryPypiArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobTaskForEachTaskTaskNewCluster
, JobTaskForEachTaskTaskNewClusterArgs

SparkVersion This property is required. string
ApplyPolicyDefaultValues bool
Autoscale JobTaskForEachTaskTaskNewClusterAutoscale
AwsAttributes JobTaskForEachTaskTaskNewClusterAwsAttributes
AzureAttributes JobTaskForEachTaskTaskNewClusterAzureAttributes
ClusterId string
ClusterLogConf JobTaskForEachTaskTaskNewClusterClusterLogConf
ClusterMountInfos List<JobTaskForEachTaskTaskNewClusterClusterMountInfo>
ClusterName string
CustomTags Dictionary<string, string>
DataSecurityMode string
DockerImage JobTaskForEachTaskTaskNewClusterDockerImage
DriverInstancePoolId string
DriverNodeTypeId string
EnableElasticDisk bool
EnableLocalDiskEncryption bool
GcpAttributes JobTaskForEachTaskTaskNewClusterGcpAttributes
IdempotencyToken Changes to this property will trigger replacement. string
InitScripts List<JobTaskForEachTaskTaskNewClusterInitScript>
InstancePoolId string
IsSingleNode bool
Kind string
Libraries List<JobTaskForEachTaskTaskNewClusterLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
NodeTypeId string
NumWorkers int
PolicyId string
RuntimeEngine string
SingleUserName string
SparkConf Dictionary<string, string>
SparkEnvVars Dictionary<string, string>
SshPublicKeys List<string>
UseMlRuntime bool
WorkloadType JobTaskForEachTaskTaskNewClusterWorkloadType
isn't supported
SparkVersion This property is required. string
ApplyPolicyDefaultValues bool
Autoscale JobTaskForEachTaskTaskNewClusterAutoscale
AwsAttributes JobTaskForEachTaskTaskNewClusterAwsAttributes
AzureAttributes JobTaskForEachTaskTaskNewClusterAzureAttributes
ClusterId string
ClusterLogConf JobTaskForEachTaskTaskNewClusterClusterLogConf
ClusterMountInfos []JobTaskForEachTaskTaskNewClusterClusterMountInfo
ClusterName string
CustomTags map[string]string
DataSecurityMode string
DockerImage JobTaskForEachTaskTaskNewClusterDockerImage
DriverInstancePoolId string
DriverNodeTypeId string
EnableElasticDisk bool
EnableLocalDiskEncryption bool
GcpAttributes JobTaskForEachTaskTaskNewClusterGcpAttributes
IdempotencyToken Changes to this property will trigger replacement. string
InitScripts []JobTaskForEachTaskTaskNewClusterInitScript
InstancePoolId string
IsSingleNode bool
Kind string
Libraries []JobTaskForEachTaskTaskNewClusterLibrary
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
NodeTypeId string
NumWorkers int
PolicyId string
RuntimeEngine string
SingleUserName string
SparkConf map[string]string
SparkEnvVars map[string]string
SshPublicKeys []string
UseMlRuntime bool
WorkloadType JobTaskForEachTaskTaskNewClusterWorkloadType
isn't supported
sparkVersion This property is required. String
applyPolicyDefaultValues Boolean
autoscale JobTaskForEachTaskTaskNewClusterAutoscale
awsAttributes JobTaskForEachTaskTaskNewClusterAwsAttributes
azureAttributes JobTaskForEachTaskTaskNewClusterAzureAttributes
clusterId String
clusterLogConf JobTaskForEachTaskTaskNewClusterClusterLogConf
clusterMountInfos List<JobTaskForEachTaskTaskNewClusterClusterMountInfo>
clusterName String
customTags Map<String,String>
dataSecurityMode String
dockerImage JobTaskForEachTaskTaskNewClusterDockerImage
driverInstancePoolId String
driverNodeTypeId String
enableElasticDisk Boolean
enableLocalDiskEncryption Boolean
gcpAttributes JobTaskForEachTaskTaskNewClusterGcpAttributes
idempotencyToken Changes to this property will trigger replacement. String
initScripts List<JobTaskForEachTaskTaskNewClusterInitScript>
instancePoolId String
isSingleNode Boolean
kind String
libraries List<JobTaskForEachTaskTaskNewClusterLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
nodeTypeId String
numWorkers Integer
policyId String
runtimeEngine String
singleUserName String
sparkConf Map<String,String>
sparkEnvVars Map<String,String>
sshPublicKeys List<String>
useMlRuntime Boolean
workloadType JobTaskForEachTaskTaskNewClusterWorkloadType
isn't supported
spark_version This property is required. str
apply_policy_default_values bool
autoscale JobTaskForEachTaskTaskNewClusterAutoscale
aws_attributes JobTaskForEachTaskTaskNewClusterAwsAttributes
azure_attributes JobTaskForEachTaskTaskNewClusterAzureAttributes
cluster_id str
cluster_log_conf JobTaskForEachTaskTaskNewClusterClusterLogConf
cluster_mount_infos Sequence[JobTaskForEachTaskTaskNewClusterClusterMountInfo]
cluster_name str
custom_tags Mapping[str, str]
data_security_mode str
docker_image JobTaskForEachTaskTaskNewClusterDockerImage
driver_instance_pool_id str
driver_node_type_id str
enable_elastic_disk bool
enable_local_disk_encryption bool
gcp_attributes JobTaskForEachTaskTaskNewClusterGcpAttributes
idempotency_token Changes to this property will trigger replacement. str
init_scripts Sequence[JobTaskForEachTaskTaskNewClusterInitScript]
instance_pool_id str
is_single_node bool
kind str
libraries Sequence[JobTaskForEachTaskTaskNewClusterLibrary]
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
node_type_id str
num_workers int
policy_id str
runtime_engine str
single_user_name str
spark_conf Mapping[str, str]
spark_env_vars Mapping[str, str]
ssh_public_keys Sequence[str]
use_ml_runtime bool
workload_type JobTaskForEachTaskTaskNewClusterWorkloadType
isn't supported
sparkVersion This property is required. String
applyPolicyDefaultValues Boolean
autoscale Property Map
awsAttributes Property Map
azureAttributes Property Map
clusterId String
clusterLogConf Property Map
clusterMountInfos List<Property Map>
clusterName String
customTags Map<String>
dataSecurityMode String
dockerImage Property Map
driverInstancePoolId String
driverNodeTypeId String
enableElasticDisk Boolean
enableLocalDiskEncryption Boolean
gcpAttributes Property Map
idempotencyToken Changes to this property will trigger replacement. String
initScripts List<Property Map>
instancePoolId String
isSingleNode Boolean
kind String
libraries List<Property Map>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
nodeTypeId String
numWorkers Number
policyId String
runtimeEngine String
singleUserName String
sparkConf Map<String>
sparkEnvVars Map<String>
sshPublicKeys List<String>
useMlRuntime Boolean
workloadType Property Map
isn't supported

JobTaskForEachTaskTaskNewClusterAutoscale
, JobTaskForEachTaskTaskNewClusterAutoscaleArgs

maxWorkers Integer
minWorkers Integer
maxWorkers number
minWorkers number
maxWorkers Number
minWorkers Number

JobTaskForEachTaskTaskNewClusterAwsAttributes
, JobTaskForEachTaskTaskNewClusterAwsAttributesArgs

JobTaskForEachTaskTaskNewClusterAzureAttributes
, JobTaskForEachTaskTaskNewClusterAzureAttributesArgs

JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfo
, JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfoArgs

JobTaskForEachTaskTaskNewClusterClusterLogConf
, JobTaskForEachTaskTaskNewClusterClusterLogConfArgs

JobTaskForEachTaskTaskNewClusterClusterLogConfDbfs
, JobTaskForEachTaskTaskNewClusterClusterLogConfDbfsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskForEachTaskTaskNewClusterClusterLogConfS3
, JobTaskForEachTaskTaskNewClusterClusterLogConfS3Args

Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination This property is required. string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination This property is required. str
canned_acl str
enable_encryption bool
encryption_type str
endpoint str
kms_key str
region str
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

JobTaskForEachTaskTaskNewClusterClusterLogConfVolumes
, JobTaskForEachTaskTaskNewClusterClusterLogConfVolumesArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskForEachTaskTaskNewClusterClusterMountInfo
, JobTaskForEachTaskTaskNewClusterClusterMountInfoArgs

localMountDirPath This property is required. String
networkFilesystemInfo This property is required. Property Map
remoteMountDirPath String

JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo
, JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs

ServerAddress This property is required. string
MountOptions string
ServerAddress This property is required. string
MountOptions string
serverAddress This property is required. String
mountOptions String
serverAddress This property is required. string
mountOptions string
server_address This property is required. str
mount_options str
serverAddress This property is required. String
mountOptions String

JobTaskForEachTaskTaskNewClusterDockerImage
, JobTaskForEachTaskTaskNewClusterDockerImageArgs

Url This property is required. string
URL of the job on the given workspace
BasicAuth JobTaskForEachTaskTaskNewClusterDockerImageBasicAuth
Url This property is required. string
URL of the job on the given workspace
BasicAuth JobTaskForEachTaskTaskNewClusterDockerImageBasicAuth
url This property is required. String
URL of the job on the given workspace
basicAuth JobTaskForEachTaskTaskNewClusterDockerImageBasicAuth
url This property is required. string
URL of the job on the given workspace
basicAuth JobTaskForEachTaskTaskNewClusterDockerImageBasicAuth
url This property is required. str
URL of the job on the given workspace
basic_auth JobTaskForEachTaskTaskNewClusterDockerImageBasicAuth
url This property is required. String
URL of the job on the given workspace
basicAuth Property Map

JobTaskForEachTaskTaskNewClusterDockerImageBasicAuth
, JobTaskForEachTaskTaskNewClusterDockerImageBasicAuthArgs

Password This property is required. string
Username This property is required. string
Password This property is required. string
Username This property is required. string
password This property is required. String
username This property is required. String
password This property is required. string
username This property is required. string
password This property is required. str
username This property is required. str
password This property is required. String
username This property is required. String

JobTaskForEachTaskTaskNewClusterGcpAttributes
, JobTaskForEachTaskTaskNewClusterGcpAttributesArgs

JobTaskForEachTaskTaskNewClusterInitScript
, JobTaskForEachTaskTaskNewClusterInitScriptArgs

abfss Property Map
dbfs Property Map

Deprecated: For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'.

file Property Map
block consisting of single string fields:
gcs Property Map
s3 Property Map
volumes Property Map
workspace Property Map

JobTaskForEachTaskTaskNewClusterInitScriptAbfss
, JobTaskForEachTaskTaskNewClusterInitScriptAbfssArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskForEachTaskTaskNewClusterInitScriptDbfs
, JobTaskForEachTaskTaskNewClusterInitScriptDbfsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskForEachTaskTaskNewClusterInitScriptFile
, JobTaskForEachTaskTaskNewClusterInitScriptFileArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskForEachTaskTaskNewClusterInitScriptGcs
, JobTaskForEachTaskTaskNewClusterInitScriptGcsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskForEachTaskTaskNewClusterInitScriptS3
, JobTaskForEachTaskTaskNewClusterInitScriptS3Args

Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination This property is required. string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination This property is required. str
canned_acl str
enable_encryption bool
encryption_type str
endpoint str
kms_key str
region str
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

JobTaskForEachTaskTaskNewClusterInitScriptVolumes
, JobTaskForEachTaskTaskNewClusterInitScriptVolumesArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskForEachTaskTaskNewClusterInitScriptWorkspace
, JobTaskForEachTaskTaskNewClusterInitScriptWorkspaceArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskForEachTaskTaskNewClusterLibrary
, JobTaskForEachTaskTaskNewClusterLibraryArgs

JobTaskForEachTaskTaskNewClusterLibraryCran
, JobTaskForEachTaskTaskNewClusterLibraryCranArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobTaskForEachTaskTaskNewClusterLibraryMaven
, JobTaskForEachTaskTaskNewClusterLibraryMavenArgs

Coordinates This property is required. string
Exclusions List<string>
Repo string
Coordinates This property is required. string
Exclusions []string
Repo string
coordinates This property is required. String
exclusions List<String>
repo String
coordinates This property is required. string
exclusions string[]
repo string
coordinates This property is required. str
exclusions Sequence[str]
repo str
coordinates This property is required. String
exclusions List<String>
repo String

JobTaskForEachTaskTaskNewClusterLibraryPypi
, JobTaskForEachTaskTaskNewClusterLibraryPypiArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobTaskForEachTaskTaskNewClusterWorkloadType
, JobTaskForEachTaskTaskNewClusterWorkloadTypeArgs

clients This property is required. Property Map

JobTaskForEachTaskTaskNewClusterWorkloadTypeClients
, JobTaskForEachTaskTaskNewClusterWorkloadTypeClientsArgs

Jobs bool
Notebooks bool
Jobs bool
Notebooks bool
jobs Boolean
notebooks Boolean
jobs boolean
notebooks boolean
jobs bool
notebooks bool
jobs Boolean
notebooks Boolean

JobTaskForEachTaskTaskNotebookTask
, JobTaskForEachTaskTaskNotebookTaskArgs

NotebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
BaseParameters Dictionary<string, string>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
Source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
WarehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
NotebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
BaseParameters map[string]string
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
Source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
WarehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. String
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters Map<String,String>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source String
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId String
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters {[key: string]: string}
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebook_path This property is required. str
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
base_parameters Mapping[str, str]
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source str
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouse_id str
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. String
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters Map<String>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source String
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId String
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.

JobTaskForEachTaskTaskNotificationSettings
, JobTaskForEachTaskTaskNotificationSettingsArgs

AlertOnLastAttempt bool
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
NoAlertForCanceledRuns bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs.
AlertOnLastAttempt bool
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
NoAlertForCanceledRuns bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs.
alertOnLastAttempt Boolean
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
noAlertForCanceledRuns Boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs.
alertOnLastAttempt boolean
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
noAlertForCanceledRuns boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns boolean
(Bool) don't send alert for skipped runs.
alert_on_last_attempt bool
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
no_alert_for_canceled_runs bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

no_alert_for_skipped_runs bool
(Bool) don't send alert for skipped runs.
alertOnLastAttempt Boolean
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
noAlertForCanceledRuns Boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs.

JobTaskForEachTaskTaskPipelineTask
, JobTaskForEachTaskTaskPipelineTaskArgs

PipelineId This property is required. string
The pipeline's unique ID.
FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

PipelineId This property is required. string
The pipeline's unique ID.
FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. String
The pipeline's unique ID.
fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. string
The pipeline's unique ID.
fullRefresh boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipeline_id This property is required. str
The pipeline's unique ID.
full_refresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. String
The pipeline's unique ID.
fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

JobTaskForEachTaskTaskPythonWheelTask
, JobTaskForEachTaskTaskPythonWheelTaskArgs

EntryPoint string
Python function as entry point for the task
NamedParameters Dictionary<string, string>
Named parameters for the task
PackageName string
Name of Python package
Parameters List<string>
Parameters for the task
EntryPoint string
Python function as entry point for the task
NamedParameters map[string]string
Named parameters for the task
PackageName string
Name of Python package
Parameters []string
Parameters for the task
entryPoint String
Python function as entry point for the task
namedParameters Map<String,String>
Named parameters for the task
packageName String
Name of Python package
parameters List<String>
Parameters for the task
entryPoint string
Python function as entry point for the task
namedParameters {[key: string]: string}
Named parameters for the task
packageName string
Name of Python package
parameters string[]
Parameters for the task
entry_point str
Python function as entry point for the task
named_parameters Mapping[str, str]
Named parameters for the task
package_name str
Name of Python package
parameters Sequence[str]
Parameters for the task
entryPoint String
Python function as entry point for the task
namedParameters Map<String>
Named parameters for the task
packageName String
Name of Python package
parameters List<String>
Parameters for the task

JobTaskForEachTaskTaskRunJobTask
, JobTaskForEachTaskTaskRunJobTaskArgs

JobId This property is required. int
(String) ID of the job
DbtCommands List<string>
JarParams List<string>
JobParameters Dictionary<string, string>
(Map) Job parameters for the task
NotebookParams Dictionary<string, string>
PipelineParams JobTaskForEachTaskTaskRunJobTaskPipelineParams
PythonNamedParams Dictionary<string, string>
PythonParams List<string>
SparkSubmitParams List<string>
SqlParams Dictionary<string, string>
JobId This property is required. int
(String) ID of the job
DbtCommands []string
JarParams []string
JobParameters map[string]string
(Map) Job parameters for the task
NotebookParams map[string]string
PipelineParams JobTaskForEachTaskTaskRunJobTaskPipelineParams
PythonNamedParams map[string]string
PythonParams []string
SparkSubmitParams []string
SqlParams map[string]string
jobId This property is required. Integer
(String) ID of the job
dbtCommands List<String>
jarParams List<String>
jobParameters Map<String,String>
(Map) Job parameters for the task
notebookParams Map<String,String>
pipelineParams JobTaskForEachTaskTaskRunJobTaskPipelineParams
pythonNamedParams Map<String,String>
pythonParams List<String>
sparkSubmitParams List<String>
sqlParams Map<String,String>
jobId This property is required. number
(String) ID of the job
dbtCommands string[]
jarParams string[]
jobParameters {[key: string]: string}
(Map) Job parameters for the task
notebookParams {[key: string]: string}
pipelineParams JobTaskForEachTaskTaskRunJobTaskPipelineParams
pythonNamedParams {[key: string]: string}
pythonParams string[]
sparkSubmitParams string[]
sqlParams {[key: string]: string}
job_id This property is required. int
(String) ID of the job
dbt_commands Sequence[str]
jar_params Sequence[str]
job_parameters Mapping[str, str]
(Map) Job parameters for the task
notebook_params Mapping[str, str]
pipeline_params JobTaskForEachTaskTaskRunJobTaskPipelineParams
python_named_params Mapping[str, str]
python_params Sequence[str]
spark_submit_params Sequence[str]
sql_params Mapping[str, str]
jobId This property is required. Number
(String) ID of the job
dbtCommands List<String>
jarParams List<String>
jobParameters Map<String>
(Map) Job parameters for the task
notebookParams Map<String>
pipelineParams Property Map
pythonNamedParams Map<String>
pythonParams List<String>
sparkSubmitParams List<String>
sqlParams Map<String>

JobTaskForEachTaskTaskRunJobTaskPipelineParams
, JobTaskForEachTaskTaskRunJobTaskPipelineParamsArgs

FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

fullRefresh boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

full_refresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

JobTaskForEachTaskTaskSparkJarTask
, JobTaskForEachTaskTaskSparkJarTaskArgs

JarUri string
MainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
Parameters List<string>
(List) Parameters passed to the main method.
RunAsRepl bool
JarUri string
MainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
Parameters []string
(List) Parameters passed to the main method.
RunAsRepl bool
jarUri String
mainClassName String
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters List<String>
(List) Parameters passed to the main method.
runAsRepl Boolean
jarUri string
mainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters string[]
(List) Parameters passed to the main method.
runAsRepl boolean
jar_uri str
main_class_name str
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters Sequence[str]
(List) Parameters passed to the main method.
run_as_repl bool
jarUri String
mainClassName String
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters List<String>
(List) Parameters passed to the main method.
runAsRepl Boolean

JobTaskForEachTaskTaskSparkPythonTask
, JobTaskForEachTaskTaskSparkPythonTaskArgs

PythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
Parameters List<string>
(List) Command line parameters passed to the Python file.
Source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
PythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
Parameters []string
(List) Command line parameters passed to the Python file.
Source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. String
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters List<String>
(List) Command line parameters passed to the Python file.
source String
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters string[]
(List) Command line parameters passed to the Python file.
source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
python_file This property is required. str
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters Sequence[str]
(List) Command line parameters passed to the Python file.
source str
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. String
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters List<String>
(List) Command line parameters passed to the Python file.
source String
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.

JobTaskForEachTaskTaskSparkSubmitTask
, JobTaskForEachTaskTaskSparkSubmitTaskArgs

Parameters List<string>
(List) Command-line parameters passed to spark submit.
Parameters []string
(List) Command-line parameters passed to spark submit.
parameters List<String>
(List) Command-line parameters passed to spark submit.
parameters string[]
(List) Command-line parameters passed to spark submit.
parameters Sequence[str]
(List) Command-line parameters passed to spark submit.
parameters List<String>
(List) Command-line parameters passed to spark submit.

JobTaskForEachTaskTaskSqlTask
, JobTaskForEachTaskTaskSqlTaskArgs

WarehouseId This property is required. string
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
Alert JobTaskForEachTaskTaskSqlTaskAlert
block consisting of following fields:
Dashboard JobTaskForEachTaskTaskSqlTaskDashboard
block consisting of following fields:
File JobTaskForEachTaskTaskSqlTaskFile
block consisting of single string fields:
Parameters Dictionary<string, string>
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
Query JobTaskForEachTaskTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
WarehouseId This property is required. string
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
Alert JobTaskForEachTaskTaskSqlTaskAlert
block consisting of following fields:
Dashboard JobTaskForEachTaskTaskSqlTaskDashboard
block consisting of following fields:
File JobTaskForEachTaskTaskSqlTaskFile
block consisting of single string fields:
Parameters map[string]string
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
Query JobTaskForEachTaskTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
warehouseId This property is required. String
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
alert JobTaskForEachTaskTaskSqlTaskAlert
block consisting of following fields:
dashboard JobTaskForEachTaskTaskSqlTaskDashboard
block consisting of following fields:
file JobTaskForEachTaskTaskSqlTaskFile
block consisting of single string fields:
parameters Map<String,String>
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
query JobTaskForEachTaskTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
warehouseId This property is required. string
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
alert JobTaskForEachTaskTaskSqlTaskAlert
block consisting of following fields:
dashboard JobTaskForEachTaskTaskSqlTaskDashboard
block consisting of following fields:
file JobTaskForEachTaskTaskSqlTaskFile
block consisting of single string fields:
parameters {[key: string]: string}
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
query JobTaskForEachTaskTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
warehouse_id This property is required. str
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
alert JobTaskForEachTaskTaskSqlTaskAlert
block consisting of following fields:
dashboard JobTaskForEachTaskTaskSqlTaskDashboard
block consisting of following fields:
file JobTaskForEachTaskTaskSqlTaskFile
block consisting of single string fields:
parameters Mapping[str, str]
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
query JobTaskForEachTaskTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
warehouseId This property is required. String
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
alert Property Map
block consisting of following fields:
dashboard Property Map
block consisting of following fields:
file Property Map
block consisting of single string fields:
parameters Map<String>
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
query Property Map
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).

JobTaskForEachTaskTaskSqlTaskAlert
, JobTaskForEachTaskTaskSqlTaskAlertArgs

AlertId This property is required. string
(String) identifier of the Databricks Alert (databricks_alert).
PauseSubscriptions bool
flag that specifies if subscriptions are paused or not.
Subscriptions List<JobTaskForEachTaskTaskSqlTaskAlertSubscription>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
AlertId This property is required. string
(String) identifier of the Databricks Alert (databricks_alert).
PauseSubscriptions bool
flag that specifies if subscriptions are paused or not.
Subscriptions []JobTaskForEachTaskTaskSqlTaskAlertSubscription
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
alertId This property is required. String
(String) identifier of the Databricks Alert (databricks_alert).
pauseSubscriptions Boolean
flag that specifies if subscriptions are paused or not.
subscriptions List<JobTaskForEachTaskTaskSqlTaskAlertSubscription>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
alertId This property is required. string
(String) identifier of the Databricks Alert (databricks_alert).
pauseSubscriptions boolean
flag that specifies if subscriptions are paused or not.
subscriptions JobTaskForEachTaskTaskSqlTaskAlertSubscription[]
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
alert_id This property is required. str
(String) identifier of the Databricks Alert (databricks_alert).
pause_subscriptions bool
flag that specifies if subscriptions are paused or not.
subscriptions Sequence[JobTaskForEachTaskTaskSqlTaskAlertSubscription]
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
alertId This property is required. String
(String) identifier of the Databricks Alert (databricks_alert).
pauseSubscriptions Boolean
flag that specifies if subscriptions are paused or not.
subscriptions List<Property Map>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.

JobTaskForEachTaskTaskSqlTaskAlertSubscription
, JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArgs

DestinationId string
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
DestinationId string
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId String
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId string
userName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
destination_id str
user_name str
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId String
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.

JobTaskForEachTaskTaskSqlTaskDashboard
, JobTaskForEachTaskTaskSqlTaskDashboardArgs

DashboardId This property is required. string
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
CustomSubject string
string specifying a custom subject of email sent.
PauseSubscriptions bool
flag that specifies if subscriptions are paused or not.
Subscriptions List<JobTaskForEachTaskTaskSqlTaskDashboardSubscription>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
DashboardId This property is required. string
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
CustomSubject string
string specifying a custom subject of email sent.
PauseSubscriptions bool
flag that specifies if subscriptions are paused or not.
Subscriptions []JobTaskForEachTaskTaskSqlTaskDashboardSubscription
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
dashboardId This property is required. String
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
customSubject String
string specifying a custom subject of email sent.
pauseSubscriptions Boolean
flag that specifies if subscriptions are paused or not.
subscriptions List<JobTaskForEachTaskTaskSqlTaskDashboardSubscription>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
dashboardId This property is required. string
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
customSubject string
string specifying a custom subject of email sent.
pauseSubscriptions boolean
flag that specifies if subscriptions are paused or not.
subscriptions JobTaskForEachTaskTaskSqlTaskDashboardSubscription[]
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
dashboard_id This property is required. str
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
custom_subject str
string specifying a custom subject of email sent.
pause_subscriptions bool
flag that specifies if subscriptions are paused or not.
subscriptions Sequence[JobTaskForEachTaskTaskSqlTaskDashboardSubscription]
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
dashboardId This property is required. String
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
customSubject String
string specifying a custom subject of email sent.
pauseSubscriptions Boolean
flag that specifies if subscriptions are paused or not.
subscriptions List<Property Map>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.

JobTaskForEachTaskTaskSqlTaskDashboardSubscription
, JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArgs

DestinationId string
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
DestinationId string
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId String
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId string
userName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
destination_id str
user_name str
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId String
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.

JobTaskForEachTaskTaskSqlTaskFile
, JobTaskForEachTaskTaskSqlTaskFileArgs

Path This property is required. string

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
Source string
The source of the project. Possible values are WORKSPACE and GIT.
Path This property is required. string

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
Source string
The source of the project. Possible values are WORKSPACE and GIT.
path This property is required. String

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
source String
The source of the project. Possible values are WORKSPACE and GIT.
path This property is required. string

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
source string
The source of the project. Possible values are WORKSPACE and GIT.
path This property is required. str

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
source str
The source of the project. Possible values are WORKSPACE and GIT.
path This property is required. String

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
source String
The source of the project. Possible values are WORKSPACE and GIT.

JobTaskForEachTaskTaskSqlTaskQuery
, JobTaskForEachTaskTaskSqlTaskQueryArgs

QueryId This property is required. string
QueryId This property is required. string
queryId This property is required. String
queryId This property is required. string
query_id This property is required. str
queryId This property is required. String

JobTaskForEachTaskTaskWebhookNotifications
, JobTaskForEachTaskTaskWebhookNotificationsArgs

OnDurationWarningThresholdExceededs List<JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

OnFailures List<JobTaskForEachTaskTaskWebhookNotificationsOnFailure>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
OnStarts List<JobTaskForEachTaskTaskWebhookNotificationsOnStart>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
OnStreamingBacklogExceededs List<JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded>
OnSuccesses List<JobTaskForEachTaskTaskWebhookNotificationsOnSuccess>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
OnDurationWarningThresholdExceededs []JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

OnFailures []JobTaskForEachTaskTaskWebhookNotificationsOnFailure
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
OnStarts []JobTaskForEachTaskTaskWebhookNotificationsOnStart
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
OnStreamingBacklogExceededs []JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded
OnSuccesses []JobTaskForEachTaskTaskWebhookNotificationsOnSuccess
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs List<JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures List<JobTaskForEachTaskTaskWebhookNotificationsOnFailure>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts List<JobTaskForEachTaskTaskWebhookNotificationsOnStart>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs List<JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded>
onSuccesses List<JobTaskForEachTaskTaskWebhookNotificationsOnSuccess>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded[]

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures JobTaskForEachTaskTaskWebhookNotificationsOnFailure[]
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts JobTaskForEachTaskTaskWebhookNotificationsOnStart[]
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded[]
onSuccesses JobTaskForEachTaskTaskWebhookNotificationsOnSuccess[]
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
on_duration_warning_threshold_exceededs Sequence[JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded]

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

on_failures Sequence[JobTaskForEachTaskTaskWebhookNotificationsOnFailure]
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
on_starts Sequence[JobTaskForEachTaskTaskWebhookNotificationsOnStart]
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
on_streaming_backlog_exceededs Sequence[JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded]
on_successes Sequence[JobTaskForEachTaskTaskWebhookNotificationsOnSuccess]
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs List<Property Map>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures List<Property Map>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts List<Property Map>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs List<Property Map>
onSuccesses List<Property Map>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.

JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded
, JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskForEachTaskTaskWebhookNotificationsOnFailure
, JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskForEachTaskTaskWebhookNotificationsOnStart
, JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded
, JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskForEachTaskTaskWebhookNotificationsOnSuccess
, JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskGenAiComputeTask
, JobTaskGenAiComputeTaskArgs

JobTaskGenAiComputeTaskCompute
, JobTaskGenAiComputeTaskComputeArgs

GpuNodePoolId This property is required. string
NumGpus This property is required. int
GpuType string
GpuNodePoolId This property is required. string
NumGpus This property is required. int
GpuType string
gpuNodePoolId This property is required. String
numGpus This property is required. Integer
gpuType String
gpuNodePoolId This property is required. string
numGpus This property is required. number
gpuType string
gpu_node_pool_id This property is required. str
num_gpus This property is required. int
gpu_type str
gpuNodePoolId This property is required. String
numGpus This property is required. Number
gpuType String

JobTaskHealth
, JobTaskHealthArgs

Rules This property is required. List<JobTaskHealthRule>
list of rules that are represented as objects with the following attributes:
Rules This property is required. []JobTaskHealthRule
list of rules that are represented as objects with the following attributes:
rules This property is required. List<JobTaskHealthRule>
list of rules that are represented as objects with the following attributes:
rules This property is required. JobTaskHealthRule[]
list of rules that are represented as objects with the following attributes:
rules This property is required. Sequence[JobTaskHealthRule]
list of rules that are represented as objects with the following attributes:
rules This property is required. List<Property Map>
list of rules that are represented as objects with the following attributes:

JobTaskHealthRule
, JobTaskHealthRuleArgs

Metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
Op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
Value This property is required. int
integer value used to compare to the given metric.
Metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
Op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
Value This property is required. int
integer value used to compare to the given metric.
metric This property is required. String
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. String
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. Integer
integer value used to compare to the given metric.
metric This property is required. string
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. string
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. number
integer value used to compare to the given metric.
metric This property is required. str
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. str
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. int
integer value used to compare to the given metric.
metric This property is required. String
string specifying the metric to check. The only supported metric is RUN_DURATION_SECONDS (check Jobs REST API documentation for the latest information).
op This property is required. String
string specifying the operation used to evaluate the given metric. The only supported operation is GREATER_THAN.
value This property is required. Number
integer value used to compare to the given metric.

JobTaskLibrary
, JobTaskLibraryArgs

JobTaskLibraryCran
, JobTaskLibraryCranArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobTaskLibraryMaven
, JobTaskLibraryMavenArgs

Coordinates This property is required. string
Exclusions List<string>
Repo string
Coordinates This property is required. string
Exclusions []string
Repo string
coordinates This property is required. String
exclusions List<String>
repo String
coordinates This property is required. string
exclusions string[]
repo string
coordinates This property is required. str
exclusions Sequence[str]
repo str
coordinates This property is required. String
exclusions List<String>
repo String

JobTaskLibraryPypi
, JobTaskLibraryPypiArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobTaskNewCluster
, JobTaskNewClusterArgs

SparkVersion This property is required. string
ApplyPolicyDefaultValues bool
Autoscale JobTaskNewClusterAutoscale
AwsAttributes JobTaskNewClusterAwsAttributes
AzureAttributes JobTaskNewClusterAzureAttributes
ClusterId string
ClusterLogConf JobTaskNewClusterClusterLogConf
ClusterMountInfos List<JobTaskNewClusterClusterMountInfo>
ClusterName string
CustomTags Dictionary<string, string>
DataSecurityMode string
DockerImage JobTaskNewClusterDockerImage
DriverInstancePoolId string
DriverNodeTypeId string
EnableElasticDisk bool
EnableLocalDiskEncryption bool
GcpAttributes JobTaskNewClusterGcpAttributes
IdempotencyToken Changes to this property will trigger replacement. string
InitScripts List<JobTaskNewClusterInitScript>
InstancePoolId string
IsSingleNode bool
Kind string
Libraries List<JobTaskNewClusterLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
NodeTypeId string
NumWorkers int
PolicyId string
RuntimeEngine string
SingleUserName string
SparkConf Dictionary<string, string>
SparkEnvVars Dictionary<string, string>
SshPublicKeys List<string>
UseMlRuntime bool
WorkloadType JobTaskNewClusterWorkloadType
isn't supported
SparkVersion This property is required. string
ApplyPolicyDefaultValues bool
Autoscale JobTaskNewClusterAutoscale
AwsAttributes JobTaskNewClusterAwsAttributes
AzureAttributes JobTaskNewClusterAzureAttributes
ClusterId string
ClusterLogConf JobTaskNewClusterClusterLogConf
ClusterMountInfos []JobTaskNewClusterClusterMountInfo
ClusterName string
CustomTags map[string]string
DataSecurityMode string
DockerImage JobTaskNewClusterDockerImage
DriverInstancePoolId string
DriverNodeTypeId string
EnableElasticDisk bool
EnableLocalDiskEncryption bool
GcpAttributes JobTaskNewClusterGcpAttributes
IdempotencyToken Changes to this property will trigger replacement. string
InitScripts []JobTaskNewClusterInitScript
InstancePoolId string
IsSingleNode bool
Kind string
Libraries []JobTaskNewClusterLibrary
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
NodeTypeId string
NumWorkers int
PolicyId string
RuntimeEngine string
SingleUserName string
SparkConf map[string]string
SparkEnvVars map[string]string
SshPublicKeys []string
UseMlRuntime bool
WorkloadType JobTaskNewClusterWorkloadType
isn't supported
sparkVersion This property is required. String
applyPolicyDefaultValues Boolean
autoscale JobTaskNewClusterAutoscale
awsAttributes JobTaskNewClusterAwsAttributes
azureAttributes JobTaskNewClusterAzureAttributes
clusterId String
clusterLogConf JobTaskNewClusterClusterLogConf
clusterMountInfos List<JobTaskNewClusterClusterMountInfo>
clusterName String
customTags Map<String,String>
dataSecurityMode String
dockerImage JobTaskNewClusterDockerImage
driverInstancePoolId String
driverNodeTypeId String
enableElasticDisk Boolean
enableLocalDiskEncryption Boolean
gcpAttributes JobTaskNewClusterGcpAttributes
idempotencyToken Changes to this property will trigger replacement. String
initScripts List<JobTaskNewClusterInitScript>
instancePoolId String
isSingleNode Boolean
kind String
libraries List<JobTaskNewClusterLibrary>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
nodeTypeId String
numWorkers Integer
policyId String
runtimeEngine String
singleUserName String
sparkConf Map<String,String>
sparkEnvVars Map<String,String>
sshPublicKeys List<String>
useMlRuntime Boolean
workloadType JobTaskNewClusterWorkloadType
isn't supported
spark_version This property is required. str
apply_policy_default_values bool
autoscale JobTaskNewClusterAutoscale
aws_attributes JobTaskNewClusterAwsAttributes
azure_attributes JobTaskNewClusterAzureAttributes
cluster_id str
cluster_log_conf JobTaskNewClusterClusterLogConf
cluster_mount_infos Sequence[JobTaskNewClusterClusterMountInfo]
cluster_name str
custom_tags Mapping[str, str]
data_security_mode str
docker_image JobTaskNewClusterDockerImage
driver_instance_pool_id str
driver_node_type_id str
enable_elastic_disk bool
enable_local_disk_encryption bool
gcp_attributes JobTaskNewClusterGcpAttributes
idempotency_token Changes to this property will trigger replacement. str
init_scripts Sequence[JobTaskNewClusterInitScript]
instance_pool_id str
is_single_node bool
kind str
libraries Sequence[JobTaskNewClusterLibrary]
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
node_type_id str
num_workers int
policy_id str
runtime_engine str
single_user_name str
spark_conf Mapping[str, str]
spark_env_vars Mapping[str, str]
ssh_public_keys Sequence[str]
use_ml_runtime bool
workload_type JobTaskNewClusterWorkloadType
isn't supported
sparkVersion This property is required. String
applyPolicyDefaultValues Boolean
autoscale Property Map
awsAttributes Property Map
azureAttributes Property Map
clusterId String
clusterLogConf Property Map
clusterMountInfos List<Property Map>
clusterName String
customTags Map<String>
dataSecurityMode String
dockerImage Property Map
driverInstancePoolId String
driverNodeTypeId String
enableElasticDisk Boolean
enableLocalDiskEncryption Boolean
gcpAttributes Property Map
idempotencyToken Changes to this property will trigger replacement. String
initScripts List<Property Map>
instancePoolId String
isSingleNode Boolean
kind String
libraries List<Property Map>
(List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
nodeTypeId String
numWorkers Number
policyId String
runtimeEngine String
singleUserName String
sparkConf Map<String>
sparkEnvVars Map<String>
sshPublicKeys List<String>
useMlRuntime Boolean
workloadType Property Map
isn't supported

JobTaskNewClusterAutoscale
, JobTaskNewClusterAutoscaleArgs

maxWorkers Integer
minWorkers Integer
maxWorkers number
minWorkers number
maxWorkers Number
minWorkers Number

JobTaskNewClusterAwsAttributes
, JobTaskNewClusterAwsAttributesArgs

JobTaskNewClusterAzureAttributes
, JobTaskNewClusterAzureAttributesArgs

JobTaskNewClusterAzureAttributesLogAnalyticsInfo
, JobTaskNewClusterAzureAttributesLogAnalyticsInfoArgs

JobTaskNewClusterClusterLogConf
, JobTaskNewClusterClusterLogConfArgs

JobTaskNewClusterClusterLogConfDbfs
, JobTaskNewClusterClusterLogConfDbfsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskNewClusterClusterLogConfS3
, JobTaskNewClusterClusterLogConfS3Args

Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination This property is required. string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination This property is required. str
canned_acl str
enable_encryption bool
encryption_type str
endpoint str
kms_key str
region str
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

JobTaskNewClusterClusterLogConfVolumes
, JobTaskNewClusterClusterLogConfVolumesArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskNewClusterClusterMountInfo
, JobTaskNewClusterClusterMountInfoArgs

localMountDirPath This property is required. String
networkFilesystemInfo This property is required. Property Map
remoteMountDirPath String

JobTaskNewClusterClusterMountInfoNetworkFilesystemInfo
, JobTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs

ServerAddress This property is required. string
MountOptions string
ServerAddress This property is required. string
MountOptions string
serverAddress This property is required. String
mountOptions String
serverAddress This property is required. string
mountOptions string
server_address This property is required. str
mount_options str
serverAddress This property is required. String
mountOptions String

JobTaskNewClusterDockerImage
, JobTaskNewClusterDockerImageArgs

Url This property is required. string
URL of the job on the given workspace
BasicAuth JobTaskNewClusterDockerImageBasicAuth
Url This property is required. string
URL of the job on the given workspace
BasicAuth JobTaskNewClusterDockerImageBasicAuth
url This property is required. String
URL of the job on the given workspace
basicAuth JobTaskNewClusterDockerImageBasicAuth
url This property is required. string
URL of the job on the given workspace
basicAuth JobTaskNewClusterDockerImageBasicAuth
url This property is required. str
URL of the job on the given workspace
basic_auth JobTaskNewClusterDockerImageBasicAuth
url This property is required. String
URL of the job on the given workspace
basicAuth Property Map

JobTaskNewClusterDockerImageBasicAuth
, JobTaskNewClusterDockerImageBasicAuthArgs

Password This property is required. string
Username This property is required. string
Password This property is required. string
Username This property is required. string
password This property is required. String
username This property is required. String
password This property is required. string
username This property is required. string
password This property is required. str
username This property is required. str
password This property is required. String
username This property is required. String

JobTaskNewClusterGcpAttributes
, JobTaskNewClusterGcpAttributesArgs

JobTaskNewClusterInitScript
, JobTaskNewClusterInitScriptArgs

abfss Property Map
dbfs Property Map

Deprecated: For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'.

file Property Map
block consisting of single string fields:
gcs Property Map
s3 Property Map
volumes Property Map
workspace Property Map

JobTaskNewClusterInitScriptAbfss
, JobTaskNewClusterInitScriptAbfssArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskNewClusterInitScriptDbfs
, JobTaskNewClusterInitScriptDbfsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskNewClusterInitScriptFile
, JobTaskNewClusterInitScriptFileArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskNewClusterInitScriptGcs
, JobTaskNewClusterInitScriptGcsArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskNewClusterInitScriptS3
, JobTaskNewClusterInitScriptS3Args

Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
Destination This property is required. string
CannedAcl string
EnableEncryption bool
EncryptionType string
Endpoint string
KmsKey string
Region string
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String
destination This property is required. string
cannedAcl string
enableEncryption boolean
encryptionType string
endpoint string
kmsKey string
region string
destination This property is required. str
canned_acl str
enable_encryption bool
encryption_type str
endpoint str
kms_key str
region str
destination This property is required. String
cannedAcl String
enableEncryption Boolean
encryptionType String
endpoint String
kmsKey String
region String

JobTaskNewClusterInitScriptVolumes
, JobTaskNewClusterInitScriptVolumesArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskNewClusterInitScriptWorkspace
, JobTaskNewClusterInitScriptWorkspaceArgs

Destination This property is required. string
Destination This property is required. string
destination This property is required. String
destination This property is required. string
destination This property is required. str
destination This property is required. String

JobTaskNewClusterLibrary
, JobTaskNewClusterLibraryArgs

JobTaskNewClusterLibraryCran
, JobTaskNewClusterLibraryCranArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobTaskNewClusterLibraryMaven
, JobTaskNewClusterLibraryMavenArgs

Coordinates This property is required. string
Exclusions List<string>
Repo string
Coordinates This property is required. string
Exclusions []string
Repo string
coordinates This property is required. String
exclusions List<String>
repo String
coordinates This property is required. string
exclusions string[]
repo string
coordinates This property is required. str
exclusions Sequence[str]
repo str
coordinates This property is required. String
exclusions List<String>
repo String

JobTaskNewClusterLibraryPypi
, JobTaskNewClusterLibraryPypiArgs

Package This property is required. string
Repo string
Package This property is required. string
Repo string
package_ This property is required. String
repo String
package This property is required. string
repo string
package This property is required. str
repo str
package This property is required. String
repo String

JobTaskNewClusterWorkloadType
, JobTaskNewClusterWorkloadTypeArgs

clients This property is required. Property Map

JobTaskNewClusterWorkloadTypeClients
, JobTaskNewClusterWorkloadTypeClientsArgs

Jobs bool
Notebooks bool
Jobs bool
Notebooks bool
jobs Boolean
notebooks Boolean
jobs boolean
notebooks boolean
jobs bool
notebooks bool
jobs Boolean
notebooks Boolean

JobTaskNotebookTask
, JobTaskNotebookTaskArgs

NotebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
BaseParameters Dictionary<string, string>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
Source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
WarehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
NotebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
BaseParameters map[string]string
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
Source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
WarehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. String
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters Map<String,String>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source String
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId String
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. string
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters {[key: string]: string}
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source string
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId string
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebook_path This property is required. str
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
base_parameters Mapping[str, str]
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source str
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouse_id str
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
notebookPath This property is required. String
The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
baseParameters Map<String>
(Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using dbutils.widgets.get.
source String
Location type of the notebook, can only be WORKSPACE or GIT. When set to WORKSPACE, the notebook will be retrieved from the local Databricks workspace. When set to GIT, the notebook will be retrieved from a Git repository defined in git_source. If the value is empty, the task will use GIT if git_source is defined and WORKSPACE otherwise.
warehouseId String
ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.

JobTaskNotificationSettings
, JobTaskNotificationSettingsArgs

AlertOnLastAttempt bool
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
NoAlertForCanceledRuns bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs.
AlertOnLastAttempt bool
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
NoAlertForCanceledRuns bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

NoAlertForSkippedRuns bool
(Bool) don't send alert for skipped runs.
alertOnLastAttempt Boolean
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
noAlertForCanceledRuns Boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs.
alertOnLastAttempt boolean
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
noAlertForCanceledRuns boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns boolean
(Bool) don't send alert for skipped runs.
alert_on_last_attempt bool
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
no_alert_for_canceled_runs bool

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

no_alert_for_skipped_runs bool
(Bool) don't send alert for skipped runs.
alertOnLastAttempt Boolean
(Bool) do not send notifications to recipients specified in on_start for the retried runs and do not send notifications to recipients specified in on_failure until the last retry of the run.
noAlertForCanceledRuns Boolean

(Bool) don't send alert for cancelled runs.

The following parameter is only available on task level.

noAlertForSkippedRuns Boolean
(Bool) don't send alert for skipped runs.

JobTaskPipelineTask
, JobTaskPipelineTaskArgs

PipelineId This property is required. string
The pipeline's unique ID.
FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

PipelineId This property is required. string
The pipeline's unique ID.
FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. String
The pipeline's unique ID.
fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. string
The pipeline's unique ID.
fullRefresh boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipeline_id This property is required. str
The pipeline's unique ID.
full_refresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

pipelineId This property is required. String
The pipeline's unique ID.
fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

JobTaskPythonWheelTask
, JobTaskPythonWheelTaskArgs

EntryPoint string
Python function as entry point for the task
NamedParameters Dictionary<string, string>
Named parameters for the task
PackageName string
Name of Python package
Parameters List<string>
Parameters for the task
EntryPoint string
Python function as entry point for the task
NamedParameters map[string]string
Named parameters for the task
PackageName string
Name of Python package
Parameters []string
Parameters for the task
entryPoint String
Python function as entry point for the task
namedParameters Map<String,String>
Named parameters for the task
packageName String
Name of Python package
parameters List<String>
Parameters for the task
entryPoint string
Python function as entry point for the task
namedParameters {[key: string]: string}
Named parameters for the task
packageName string
Name of Python package
parameters string[]
Parameters for the task
entry_point str
Python function as entry point for the task
named_parameters Mapping[str, str]
Named parameters for the task
package_name str
Name of Python package
parameters Sequence[str]
Parameters for the task
entryPoint String
Python function as entry point for the task
namedParameters Map<String>
Named parameters for the task
packageName String
Name of Python package
parameters List<String>
Parameters for the task

JobTaskRunJobTask
, JobTaskRunJobTaskArgs

JobId This property is required. int
(String) ID of the job
DbtCommands List<string>
JarParams List<string>
JobParameters Dictionary<string, string>
(Map) Job parameters for the task
NotebookParams Dictionary<string, string>
PipelineParams JobTaskRunJobTaskPipelineParams
PythonNamedParams Dictionary<string, string>
PythonParams List<string>
SparkSubmitParams List<string>
SqlParams Dictionary<string, string>
JobId This property is required. int
(String) ID of the job
DbtCommands []string
JarParams []string
JobParameters map[string]string
(Map) Job parameters for the task
NotebookParams map[string]string
PipelineParams JobTaskRunJobTaskPipelineParams
PythonNamedParams map[string]string
PythonParams []string
SparkSubmitParams []string
SqlParams map[string]string
jobId This property is required. Integer
(String) ID of the job
dbtCommands List<String>
jarParams List<String>
jobParameters Map<String,String>
(Map) Job parameters for the task
notebookParams Map<String,String>
pipelineParams JobTaskRunJobTaskPipelineParams
pythonNamedParams Map<String,String>
pythonParams List<String>
sparkSubmitParams List<String>
sqlParams Map<String,String>
jobId This property is required. number
(String) ID of the job
dbtCommands string[]
jarParams string[]
jobParameters {[key: string]: string}
(Map) Job parameters for the task
notebookParams {[key: string]: string}
pipelineParams JobTaskRunJobTaskPipelineParams
pythonNamedParams {[key: string]: string}
pythonParams string[]
sparkSubmitParams string[]
sqlParams {[key: string]: string}
job_id This property is required. int
(String) ID of the job
dbt_commands Sequence[str]
jar_params Sequence[str]
job_parameters Mapping[str, str]
(Map) Job parameters for the task
notebook_params Mapping[str, str]
pipeline_params JobTaskRunJobTaskPipelineParams
python_named_params Mapping[str, str]
python_params Sequence[str]
spark_submit_params Sequence[str]
sql_params Mapping[str, str]
jobId This property is required. Number
(String) ID of the job
dbtCommands List<String>
jarParams List<String>
jobParameters Map<String>
(Map) Job parameters for the task
notebookParams Map<String>
pipelineParams Property Map
pythonNamedParams Map<String>
pythonParams List<String>
sparkSubmitParams List<String>
sqlParams Map<String>

JobTaskRunJobTaskPipelineParams
, JobTaskRunJobTaskPipelineParamsArgs

FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

FullRefresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

fullRefresh boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

full_refresh bool

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

fullRefresh Boolean

(Bool) Specifies if there should be full refresh of the pipeline.

The following configuration blocks are only supported inside a task block

JobTaskSparkJarTask
, JobTaskSparkJarTaskArgs

JarUri string
MainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
Parameters List<string>
(List) Parameters passed to the main method.
RunAsRepl bool
JarUri string
MainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
Parameters []string
(List) Parameters passed to the main method.
RunAsRepl bool
jarUri String
mainClassName String
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters List<String>
(List) Parameters passed to the main method.
runAsRepl Boolean
jarUri string
mainClassName string
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters string[]
(List) Parameters passed to the main method.
runAsRepl boolean
jar_uri str
main_class_name str
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters Sequence[str]
(List) Parameters passed to the main method.
run_as_repl bool
jarUri String
mainClassName String
The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use SparkContext.getOrCreate to obtain a Spark context; otherwise, runs of the job will fail.
parameters List<String>
(List) Parameters passed to the main method.
runAsRepl Boolean

JobTaskSparkPythonTask
, JobTaskSparkPythonTaskArgs

PythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
Parameters List<string>
(List) Command line parameters passed to the Python file.
Source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
PythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
Parameters []string
(List) Command line parameters passed to the Python file.
Source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. String
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters List<String>
(List) Command line parameters passed to the Python file.
source String
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. string
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters string[]
(List) Command line parameters passed to the Python file.
source string
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
python_file This property is required. str
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters Sequence[str]
(List) Command line parameters passed to the Python file.
source str
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.
pythonFile This property is required. String
The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g. s3:/, abfss:/, gs:/), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with /Repos. For files stored in a remote repository, the path must be relative. This field is required.
parameters List<String>
(List) Command line parameters passed to the Python file.
source String
Location type of the Python file, can only be GIT. When set to GIT, the Python file will be retrieved from a Git repository defined in git_source.

JobTaskSparkSubmitTask
, JobTaskSparkSubmitTaskArgs

Parameters List<string>
(List) Command-line parameters passed to spark submit.
Parameters []string
(List) Command-line parameters passed to spark submit.
parameters List<String>
(List) Command-line parameters passed to spark submit.
parameters string[]
(List) Command-line parameters passed to spark submit.
parameters Sequence[str]
(List) Command-line parameters passed to spark submit.
parameters List<String>
(List) Command-line parameters passed to spark submit.

JobTaskSqlTask
, JobTaskSqlTaskArgs

WarehouseId This property is required. string
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
Alert JobTaskSqlTaskAlert
block consisting of following fields:
Dashboard JobTaskSqlTaskDashboard
block consisting of following fields:
File JobTaskSqlTaskFile
block consisting of single string fields:
Parameters Dictionary<string, string>
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
Query JobTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
WarehouseId This property is required. string
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
Alert JobTaskSqlTaskAlert
block consisting of following fields:
Dashboard JobTaskSqlTaskDashboard
block consisting of following fields:
File JobTaskSqlTaskFile
block consisting of single string fields:
Parameters map[string]string
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
Query JobTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
warehouseId This property is required. String
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
alert JobTaskSqlTaskAlert
block consisting of following fields:
dashboard JobTaskSqlTaskDashboard
block consisting of following fields:
file JobTaskSqlTaskFile
block consisting of single string fields:
parameters Map<String,String>
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
query JobTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
warehouseId This property is required. string
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
alert JobTaskSqlTaskAlert
block consisting of following fields:
dashboard JobTaskSqlTaskDashboard
block consisting of following fields:
file JobTaskSqlTaskFile
block consisting of single string fields:
parameters {[key: string]: string}
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
query JobTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
warehouse_id This property is required. str
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
alert JobTaskSqlTaskAlert
block consisting of following fields:
dashboard JobTaskSqlTaskDashboard
block consisting of following fields:
file JobTaskSqlTaskFile
block consisting of single string fields:
parameters Mapping[str, str]
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
query JobTaskSqlTaskQuery
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).
warehouseId This property is required. String
ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
alert Property Map
block consisting of following fields:
dashboard Property Map
block consisting of following fields:
file Property Map
block consisting of single string fields:
parameters Map<String>
(Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
query Property Map
block consisting of single string field: query_id - identifier of the Databricks Query (databricks_query).

JobTaskSqlTaskAlert
, JobTaskSqlTaskAlertArgs

AlertId This property is required. string
(String) identifier of the Databricks Alert (databricks_alert).
PauseSubscriptions bool
flag that specifies if subscriptions are paused or not.
Subscriptions List<JobTaskSqlTaskAlertSubscription>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
AlertId This property is required. string
(String) identifier of the Databricks Alert (databricks_alert).
PauseSubscriptions bool
flag that specifies if subscriptions are paused or not.
Subscriptions []JobTaskSqlTaskAlertSubscription
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
alertId This property is required. String
(String) identifier of the Databricks Alert (databricks_alert).
pauseSubscriptions Boolean
flag that specifies if subscriptions are paused or not.
subscriptions List<JobTaskSqlTaskAlertSubscription>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
alertId This property is required. string
(String) identifier of the Databricks Alert (databricks_alert).
pauseSubscriptions boolean
flag that specifies if subscriptions are paused or not.
subscriptions JobTaskSqlTaskAlertSubscription[]
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
alert_id This property is required. str
(String) identifier of the Databricks Alert (databricks_alert).
pause_subscriptions bool
flag that specifies if subscriptions are paused or not.
subscriptions Sequence[JobTaskSqlTaskAlertSubscription]
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
alertId This property is required. String
(String) identifier of the Databricks Alert (databricks_alert).
pauseSubscriptions Boolean
flag that specifies if subscriptions are paused or not.
subscriptions List<Property Map>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.

JobTaskSqlTaskAlertSubscription
, JobTaskSqlTaskAlertSubscriptionArgs

DestinationId string
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
DestinationId string
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId String
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId string
userName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
destination_id str
user_name str
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId String
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.

JobTaskSqlTaskDashboard
, JobTaskSqlTaskDashboardArgs

DashboardId This property is required. string
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
CustomSubject string
string specifying a custom subject of email sent.
PauseSubscriptions bool
flag that specifies if subscriptions are paused or not.
Subscriptions List<JobTaskSqlTaskDashboardSubscription>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
DashboardId This property is required. string
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
CustomSubject string
string specifying a custom subject of email sent.
PauseSubscriptions bool
flag that specifies if subscriptions are paused or not.
Subscriptions []JobTaskSqlTaskDashboardSubscription
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
dashboardId This property is required. String
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
customSubject String
string specifying a custom subject of email sent.
pauseSubscriptions Boolean
flag that specifies if subscriptions are paused or not.
subscriptions List<JobTaskSqlTaskDashboardSubscription>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
dashboardId This property is required. string
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
customSubject string
string specifying a custom subject of email sent.
pauseSubscriptions boolean
flag that specifies if subscriptions are paused or not.
subscriptions JobTaskSqlTaskDashboardSubscription[]
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
dashboard_id This property is required. str
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
custom_subject str
string specifying a custom subject of email sent.
pause_subscriptions bool
flag that specifies if subscriptions are paused or not.
subscriptions Sequence[JobTaskSqlTaskDashboardSubscription]
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.
dashboardId This property is required. String
(String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
customSubject String
string specifying a custom subject of email sent.
pauseSubscriptions Boolean
flag that specifies if subscriptions are paused or not.
subscriptions List<Property Map>
a list of subscription blocks consisting out of one of the required fields: user_name for user emails or destination_id - for Alert destination's identifier.

JobTaskSqlTaskDashboardSubscription
, JobTaskSqlTaskDashboardSubscriptionArgs

DestinationId string
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
DestinationId string
UserName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId String
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId string
userName string
The email of an active workspace user. Non-admin users can only set this field to their own email.
destination_id str
user_name str
The email of an active workspace user. Non-admin users can only set this field to their own email.
destinationId String
userName String
The email of an active workspace user. Non-admin users can only set this field to their own email.

JobTaskSqlTaskFile
, JobTaskSqlTaskFileArgs

Path This property is required. string

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
Source string
The source of the project. Possible values are WORKSPACE and GIT.
Path This property is required. string

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
Source string
The source of the project. Possible values are WORKSPACE and GIT.
path This property is required. String

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
source String
The source of the project. Possible values are WORKSPACE and GIT.
path This property is required. string

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
source string
The source of the project. Possible values are WORKSPACE and GIT.
path This property is required. str

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
source str
The source of the project. Possible values are WORKSPACE and GIT.
path This property is required. String

If source is GIT: Relative path to the file in the repository specified in the git_source block with SQL commands to execute. If source is WORKSPACE: Absolute path to the file in the workspace with SQL commands to execute.

Example

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });

import pulumi
import pulumi_databricks as databricks

sql_aggregation_job = databricks.Job("sql_aggregation_job",
    name="Example SQL Job",
    tasks=[
        {
            "task_key": "run_agg_query",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "query": {
                    "query_id": agg_query["id"],
                },
            },
        },
        {
            "task_key": "run_dashboard",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "dashboard": {
                    "dashboard_id": dash["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
        {
            "task_key": "run_alert",
            "sql_task": {
                "warehouse_id": sql_job_warehouse["id"],
                "alert": {
                    "alert_id": alert["id"],
                    "subscriptions": [{
                        "user_name": "user@domain.com",
                    }],
                },
            },
        },
    ])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new()
    {
        Name = "Example SQL Job",
        Tasks = new[]
        {
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_agg_query",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
                    {
                        QueryId = aggQuery.Id,
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_dashboard",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
                    {
                        DashboardId = dash.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
            new Databricks.Inputs.JobTaskArgs
            {
                TaskKey = "run_alert",
                SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
                {
                    WarehouseId = sqlJobWarehouse.Id,
                    Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
                    {
                        AlertId = alert.Id,
                        Subscriptions = new[]
                        {
                            new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
                            {
                                UserName = "user@domain.com",
                            },
                        },
                    },
                },
            },
        },
    });

});
package main

import (
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{
			Name: pulumi.String("Example SQL Job"),
			Tasks: databricks.JobTaskArray{
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_agg_query"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Query: &databricks.JobTaskSqlTaskQueryArgs{
							QueryId: pulumi.Any(aggQuery.Id),
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_dashboard"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
							DashboardId: pulumi.Any(dash.Id),
							Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
								&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
				&databricks.JobTaskArgs{
					TaskKey: pulumi.String("run_alert"),
					SqlTask: &databricks.JobTaskSqlTaskArgs{
						WarehouseId: pulumi.Any(sqlJobWarehouse.Id),
						Alert: &databricks.JobTaskSqlTaskAlertArgs{
							AlertId: pulumi.Any(alert.Id),
							Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
								&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
									UserName: pulumi.String("user@domain.com"),
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs;
import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder()
            .name("Example SQL Job")
            .tasks(            
                JobTaskArgs.builder()
                    .taskKey("run_agg_query")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .query(JobTaskSqlTaskQueryArgs.builder()
                            .queryId(aggQuery.id())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_dashboard")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .dashboard(JobTaskSqlTaskDashboardArgs.builder()
                            .dashboardId(dash.id())
                            .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build(),
                JobTaskArgs.builder()
                    .taskKey("run_alert")
                    .sqlTask(JobTaskSqlTaskArgs.builder()
                        .warehouseId(sqlJobWarehouse.id())
                        .alert(JobTaskSqlTaskAlertArgs.builder()
                            .alertId(alert.id())
                            .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
                                .userName("user@domain.com")
                                .build())
                            .build())
                        .build())
                    .build())
            .build());

    }
}
resources:
  sqlAggregationJob:
    type: databricks:Job
    name: sql_aggregation_job
    properties:
      name: Example SQL Job
      tasks:
        - taskKey: run_agg_query
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            query:
              queryId: ${aggQuery.id}
        - taskKey: run_dashboard
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            dashboard:
              dashboardId: ${dash.id}
              subscriptions:
                - userName: user@domain.com
        - taskKey: run_alert
          sqlTask:
            warehouseId: ${sqlJobWarehouse.id}
            alert:
              alertId: ${alert.id}
              subscriptions:
                - userName: user@domain.com
source String
The source of the project. Possible values are WORKSPACE and GIT.

JobTaskSqlTaskQuery
, JobTaskSqlTaskQueryArgs

QueryId This property is required. string
QueryId This property is required. string
queryId This property is required. String
queryId This property is required. string
query_id This property is required. str
queryId This property is required. String

JobTaskWebhookNotifications
, JobTaskWebhookNotificationsArgs

OnDurationWarningThresholdExceededs List<JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

OnFailures List<JobTaskWebhookNotificationsOnFailure>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
OnStarts List<JobTaskWebhookNotificationsOnStart>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
OnStreamingBacklogExceededs List<JobTaskWebhookNotificationsOnStreamingBacklogExceeded>
OnSuccesses List<JobTaskWebhookNotificationsOnSuccess>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
OnDurationWarningThresholdExceededs []JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

OnFailures []JobTaskWebhookNotificationsOnFailure
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
OnStarts []JobTaskWebhookNotificationsOnStart
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
OnStreamingBacklogExceededs []JobTaskWebhookNotificationsOnStreamingBacklogExceeded
OnSuccesses []JobTaskWebhookNotificationsOnSuccess
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs List<JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures List<JobTaskWebhookNotificationsOnFailure>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts List<JobTaskWebhookNotificationsOnStart>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs List<JobTaskWebhookNotificationsOnStreamingBacklogExceeded>
onSuccesses List<JobTaskWebhookNotificationsOnSuccess>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded[]

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures JobTaskWebhookNotificationsOnFailure[]
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts JobTaskWebhookNotificationsOnStart[]
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs JobTaskWebhookNotificationsOnStreamingBacklogExceeded[]
onSuccesses JobTaskWebhookNotificationsOnSuccess[]
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
on_duration_warning_threshold_exceededs Sequence[JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded]

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

on_failures Sequence[JobTaskWebhookNotificationsOnFailure]
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
on_starts Sequence[JobTaskWebhookNotificationsOnStart]
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
on_streaming_backlog_exceededs Sequence[JobTaskWebhookNotificationsOnStreamingBacklogExceeded]
on_successes Sequence[JobTaskWebhookNotificationsOnSuccess]
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs List<Property Map>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures List<Property Map>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts List<Property Map>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs List<Property Map>
onSuccesses List<Property Map>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.

JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded
, JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskWebhookNotificationsOnFailure
, JobTaskWebhookNotificationsOnFailureArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskWebhookNotificationsOnStart
, JobTaskWebhookNotificationsOnStartArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskWebhookNotificationsOnStreamingBacklogExceeded
, JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTaskWebhookNotificationsOnSuccess
, JobTaskWebhookNotificationsOnSuccessArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobTrigger
, JobTriggerArgs

FileArrival JobTriggerFileArrival
configuration block to define a trigger for File Arrival events consisting of following attributes:
PauseStatus string
Indicate whether this trigger is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
Periodic JobTriggerPeriodic
configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
Table JobTriggerTable
TableUpdate JobTriggerTableUpdate
FileArrival JobTriggerFileArrival
configuration block to define a trigger for File Arrival events consisting of following attributes:
PauseStatus string
Indicate whether this trigger is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
Periodic JobTriggerPeriodic
configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
Table JobTriggerTable
TableUpdate JobTriggerTableUpdate
fileArrival JobTriggerFileArrival
configuration block to define a trigger for File Arrival events consisting of following attributes:
pauseStatus String
Indicate whether this trigger is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
periodic JobTriggerPeriodic
configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
table JobTriggerTable
tableUpdate JobTriggerTableUpdate
fileArrival JobTriggerFileArrival
configuration block to define a trigger for File Arrival events consisting of following attributes:
pauseStatus string
Indicate whether this trigger is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
periodic JobTriggerPeriodic
configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
table JobTriggerTable
tableUpdate JobTriggerTableUpdate
file_arrival JobTriggerFileArrival
configuration block to define a trigger for File Arrival events consisting of following attributes:
pause_status str
Indicate whether this trigger is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
periodic JobTriggerPeriodic
configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
table JobTriggerTable
table_update JobTriggerTableUpdate
fileArrival Property Map
configuration block to define a trigger for File Arrival events consisting of following attributes:
pauseStatus String
Indicate whether this trigger is paused or not. Either PAUSED or UNPAUSED. When the pause_status field is omitted in the block, the server will default to using UNPAUSED as a value for pause_status.
periodic Property Map
configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
table Property Map
tableUpdate Property Map

JobTriggerFileArrival
, JobTriggerFileArrivalArgs

Url This property is required. string
URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (/).
MinTimeBetweenTriggersSeconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
WaitAfterLastChangeSeconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
Url This property is required. string
URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (/).
MinTimeBetweenTriggersSeconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
WaitAfterLastChangeSeconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
url This property is required. String
URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (/).
minTimeBetweenTriggersSeconds Integer
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
waitAfterLastChangeSeconds Integer
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
url This property is required. string
URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (/).
minTimeBetweenTriggersSeconds number
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
waitAfterLastChangeSeconds number
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
url This property is required. str
URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (/).
min_time_between_triggers_seconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
wait_after_last_change_seconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
url This property is required. String
URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (/).
minTimeBetweenTriggersSeconds Number
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
waitAfterLastChangeSeconds Number
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.

JobTriggerPeriodic
, JobTriggerPeriodicArgs

Interval This property is required. int
Specifies the interval at which the job should run. This value is required.
Unit This property is required. string
Options are {"DAYS", "HOURS", "WEEKS"}.
Interval This property is required. int
Specifies the interval at which the job should run. This value is required.
Unit This property is required. string
Options are {"DAYS", "HOURS", "WEEKS"}.
interval This property is required. Integer
Specifies the interval at which the job should run. This value is required.
unit This property is required. String
Options are {"DAYS", "HOURS", "WEEKS"}.
interval This property is required. number
Specifies the interval at which the job should run. This value is required.
unit This property is required. string
Options are {"DAYS", "HOURS", "WEEKS"}.
interval This property is required. int
Specifies the interval at which the job should run. This value is required.
unit This property is required. str
Options are {"DAYS", "HOURS", "WEEKS"}.
interval This property is required. Number
Specifies the interval at which the job should run. This value is required.
unit This property is required. String
Options are {"DAYS", "HOURS", "WEEKS"}.

JobTriggerTable
, JobTriggerTableArgs

Condition string
MinTimeBetweenTriggersSeconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
TableNames List<string>
WaitAfterLastChangeSeconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
Condition string
MinTimeBetweenTriggersSeconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
TableNames []string
WaitAfterLastChangeSeconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
condition String
minTimeBetweenTriggersSeconds Integer
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
tableNames List<String>
waitAfterLastChangeSeconds Integer
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
condition string
minTimeBetweenTriggersSeconds number
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
tableNames string[]
waitAfterLastChangeSeconds number
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
condition str
min_time_between_triggers_seconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
table_names Sequence[str]
wait_after_last_change_seconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
condition String
minTimeBetweenTriggersSeconds Number
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
tableNames List<String>
waitAfterLastChangeSeconds Number
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.

JobTriggerTableUpdate
, JobTriggerTableUpdateArgs

TableNames This property is required. List<string>
Condition string
MinTimeBetweenTriggersSeconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
WaitAfterLastChangeSeconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
TableNames This property is required. []string
Condition string
MinTimeBetweenTriggersSeconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
WaitAfterLastChangeSeconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
tableNames This property is required. List<String>
condition String
minTimeBetweenTriggersSeconds Integer
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
waitAfterLastChangeSeconds Integer
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
tableNames This property is required. string[]
condition string
minTimeBetweenTriggersSeconds number
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
waitAfterLastChangeSeconds number
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
table_names This property is required. Sequence[str]
condition str
min_time_between_triggers_seconds int
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
wait_after_last_change_seconds int
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
tableNames This property is required. List<String>
condition String
minTimeBetweenTriggersSeconds Number
If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
waitAfterLastChangeSeconds Number
If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.

JobWebhookNotifications
, JobWebhookNotificationsArgs

OnDurationWarningThresholdExceededs List<JobWebhookNotificationsOnDurationWarningThresholdExceeded>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

OnFailures List<JobWebhookNotificationsOnFailure>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
OnStarts List<JobWebhookNotificationsOnStart>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
OnStreamingBacklogExceededs List<JobWebhookNotificationsOnStreamingBacklogExceeded>
OnSuccesses List<JobWebhookNotificationsOnSuccess>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
OnDurationWarningThresholdExceededs []JobWebhookNotificationsOnDurationWarningThresholdExceeded

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

OnFailures []JobWebhookNotificationsOnFailure
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
OnStarts []JobWebhookNotificationsOnStart
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
OnStreamingBacklogExceededs []JobWebhookNotificationsOnStreamingBacklogExceeded
OnSuccesses []JobWebhookNotificationsOnSuccess
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs List<JobWebhookNotificationsOnDurationWarningThresholdExceeded>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures List<JobWebhookNotificationsOnFailure>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts List<JobWebhookNotificationsOnStart>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs List<JobWebhookNotificationsOnStreamingBacklogExceeded>
onSuccesses List<JobWebhookNotificationsOnSuccess>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs JobWebhookNotificationsOnDurationWarningThresholdExceeded[]

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures JobWebhookNotificationsOnFailure[]
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts JobWebhookNotificationsOnStart[]
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs JobWebhookNotificationsOnStreamingBacklogExceeded[]
onSuccesses JobWebhookNotificationsOnSuccess[]
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
on_duration_warning_threshold_exceededs Sequence[JobWebhookNotificationsOnDurationWarningThresholdExceeded]

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

on_failures Sequence[JobWebhookNotificationsOnFailure]
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
on_starts Sequence[JobWebhookNotificationsOnStart]
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
on_streaming_backlog_exceededs Sequence[JobWebhookNotificationsOnStreamingBacklogExceeded]
on_successes Sequence[JobWebhookNotificationsOnSuccess]
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
onDurationWarningThresholdExceededs List<Property Map>

(List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the RUN_DURATION_SECONDS metric in the health block.

Note that the id is not to be confused with the name of the alert destination. The id can be retrieved through the API or the URL of Databricks UI https://<workspace host>/sql/destinations/<notification id>?o=<workspace id>

Example

onFailures List<Property Map>
(List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
onStarts List<Property Map>
(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
onStreamingBacklogExceededs List<Property Map>
onSuccesses List<Property Map>
(List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.

JobWebhookNotificationsOnDurationWarningThresholdExceeded
, JobWebhookNotificationsOnDurationWarningThresholdExceededArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobWebhookNotificationsOnFailure
, JobWebhookNotificationsOnFailureArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobWebhookNotificationsOnStart
, JobWebhookNotificationsOnStartArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobWebhookNotificationsOnStreamingBacklogExceeded
, JobWebhookNotificationsOnStreamingBacklogExceededArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

JobWebhookNotificationsOnSuccess
, JobWebhookNotificationsOnSuccessArgs

Id This property is required. string
ID of the job
Id This property is required. string
ID of the job
id This property is required. String
ID of the job
id This property is required. string
ID of the job
id This property is required. str
ID of the job
id This property is required. String
ID of the job

Import

The resource job can be imported using the id of the job

bash

$ pulumi import databricks:index/job:Job this <job-id>
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
databricks pulumi/pulumi-databricks
License
Apache-2.0
Notes
This Pulumi package is based on the databricks Terraform Provider.