gcp.dataproc.WorkflowTemplate
Explore with Pulumi AI
A Workflow Template is a reusable workflow configuration. It defines a graph of jobs with information on where to run those jobs.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const template = new gcp.dataproc.WorkflowTemplate("template", {
name: "template-example",
location: "us-central1",
placement: {
managedCluster: {
clusterName: "my-cluster",
config: {
gceClusterConfig: {
zone: "us-central1-a",
tags: [
"foo",
"bar",
],
},
masterConfig: {
numInstances: 1,
machineType: "n1-standard-1",
diskConfig: {
bootDiskType: "pd-ssd",
bootDiskSizeGb: 15,
},
},
workerConfig: {
numInstances: 3,
machineType: "n1-standard-2",
diskConfig: {
bootDiskSizeGb: 10,
numLocalSsds: 2,
},
},
secondaryWorkerConfig: {
numInstances: 2,
},
softwareConfig: {
imageVersion: "2.0.35-debian10",
},
},
},
},
jobs: [
{
stepId: "someJob",
sparkJob: {
mainClass: "SomeClass",
},
},
{
stepId: "otherJob",
prerequisiteStepIds: ["someJob"],
prestoJob: {
queryFileUri: "someuri",
},
},
],
});
import pulumi
import pulumi_gcp as gcp
template = gcp.dataproc.WorkflowTemplate("template",
name="template-example",
location="us-central1",
placement={
"managed_cluster": {
"cluster_name": "my-cluster",
"config": {
"gce_cluster_config": {
"zone": "us-central1-a",
"tags": [
"foo",
"bar",
],
},
"master_config": {
"num_instances": 1,
"machine_type": "n1-standard-1",
"disk_config": {
"boot_disk_type": "pd-ssd",
"boot_disk_size_gb": 15,
},
},
"worker_config": {
"num_instances": 3,
"machine_type": "n1-standard-2",
"disk_config": {
"boot_disk_size_gb": 10,
"num_local_ssds": 2,
},
},
"secondary_worker_config": {
"num_instances": 2,
},
"software_config": {
"image_version": "2.0.35-debian10",
},
},
},
},
jobs=[
{
"step_id": "someJob",
"spark_job": {
"main_class": "SomeClass",
},
},
{
"step_id": "otherJob",
"prerequisite_step_ids": ["someJob"],
"presto_job": {
"query_file_uri": "someuri",
},
},
])
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewWorkflowTemplate(ctx, "template", &dataproc.WorkflowTemplateArgs{
Name: pulumi.String("template-example"),
Location: pulumi.String("us-central1"),
Placement: &dataproc.WorkflowTemplatePlacementArgs{
ManagedCluster: &dataproc.WorkflowTemplatePlacementManagedClusterArgs{
ClusterName: pulumi.String("my-cluster"),
Config: &dataproc.WorkflowTemplatePlacementManagedClusterConfigArgs{
GceClusterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs{
Zone: pulumi.String("us-central1-a"),
Tags: pulumi.StringArray{
pulumi.String("foo"),
pulumi.String("bar"),
},
},
MasterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs{
NumInstances: pulumi.Int(1),
MachineType: pulumi.String("n1-standard-1"),
DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs{
BootDiskType: pulumi.String("pd-ssd"),
BootDiskSizeGb: pulumi.Int(15),
},
},
WorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs{
NumInstances: pulumi.Int(3),
MachineType: pulumi.String("n1-standard-2"),
DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs{
BootDiskSizeGb: pulumi.Int(10),
NumLocalSsds: pulumi.Int(2),
},
},
SecondaryWorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs{
NumInstances: pulumi.Int(2),
},
SoftwareConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs{
ImageVersion: pulumi.String("2.0.35-debian10"),
},
},
},
},
Jobs: dataproc.WorkflowTemplateJobArray{
&dataproc.WorkflowTemplateJobArgs{
StepId: pulumi.String("someJob"),
SparkJob: &dataproc.WorkflowTemplateJobSparkJobArgs{
MainClass: pulumi.String("SomeClass"),
},
},
&dataproc.WorkflowTemplateJobArgs{
StepId: pulumi.String("otherJob"),
PrerequisiteStepIds: pulumi.StringArray{
pulumi.String("someJob"),
},
PrestoJob: &dataproc.WorkflowTemplateJobPrestoJobArgs{
QueryFileUri: pulumi.String("someuri"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var template = new Gcp.Dataproc.WorkflowTemplate("template", new()
{
Name = "template-example",
Location = "us-central1",
Placement = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementArgs
{
ManagedCluster = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterArgs
{
ClusterName = "my-cluster",
Config = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigArgs
{
GceClusterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs
{
Zone = "us-central1-a",
Tags = new[]
{
"foo",
"bar",
},
},
MasterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs
{
NumInstances = 1,
MachineType = "n1-standard-1",
DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs
{
BootDiskType = "pd-ssd",
BootDiskSizeGb = 15,
},
},
WorkerConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs
{
NumInstances = 3,
MachineType = "n1-standard-2",
DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs
{
BootDiskSizeGb = 10,
NumLocalSsds = 2,
},
},
SecondaryWorkerConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs
{
NumInstances = 2,
},
SoftwareConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs
{
ImageVersion = "2.0.35-debian10",
},
},
},
},
Jobs = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplateJobArgs
{
StepId = "someJob",
SparkJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkJobArgs
{
MainClass = "SomeClass",
},
},
new Gcp.Dataproc.Inputs.WorkflowTemplateJobArgs
{
StepId = "otherJob",
PrerequisiteStepIds = new[]
{
"someJob",
},
PrestoJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPrestoJobArgs
{
QueryFileUri = "someuri",
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.WorkflowTemplate;
import com.pulumi.gcp.dataproc.WorkflowTemplateArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplateJobArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplateJobSparkJobArgs;
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplateJobPrestoJobArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var template = new WorkflowTemplate("template", WorkflowTemplateArgs.builder()
.name("template-example")
.location("us-central1")
.placement(WorkflowTemplatePlacementArgs.builder()
.managedCluster(WorkflowTemplatePlacementManagedClusterArgs.builder()
.clusterName("my-cluster")
.config(WorkflowTemplatePlacementManagedClusterConfigArgs.builder()
.gceClusterConfig(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.builder()
.zone("us-central1-a")
.tags(
"foo",
"bar")
.build())
.masterConfig(WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.builder()
.numInstances(1)
.machineType("n1-standard-1")
.diskConfig(WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs.builder()
.bootDiskType("pd-ssd")
.bootDiskSizeGb(15)
.build())
.build())
.workerConfig(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs.builder()
.numInstances(3)
.machineType("n1-standard-2")
.diskConfig(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs.builder()
.bootDiskSizeGb(10)
.numLocalSsds(2)
.build())
.build())
.secondaryWorkerConfig(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs.builder()
.numInstances(2)
.build())
.softwareConfig(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs.builder()
.imageVersion("2.0.35-debian10")
.build())
.build())
.build())
.build())
.jobs(
WorkflowTemplateJobArgs.builder()
.stepId("someJob")
.sparkJob(WorkflowTemplateJobSparkJobArgs.builder()
.mainClass("SomeClass")
.build())
.build(),
WorkflowTemplateJobArgs.builder()
.stepId("otherJob")
.prerequisiteStepIds("someJob")
.prestoJob(WorkflowTemplateJobPrestoJobArgs.builder()
.queryFileUri("someuri")
.build())
.build())
.build());
}
}
resources:
template:
type: gcp:dataproc:WorkflowTemplate
properties:
name: template-example
location: us-central1
placement:
managedCluster:
clusterName: my-cluster
config:
gceClusterConfig:
zone: us-central1-a
tags:
- foo
- bar
masterConfig:
numInstances: 1
machineType: n1-standard-1
diskConfig:
bootDiskType: pd-ssd
bootDiskSizeGb: 15
workerConfig:
numInstances: 3
machineType: n1-standard-2
diskConfig:
bootDiskSizeGb: 10
numLocalSsds: 2
secondaryWorkerConfig:
numInstances: 2
softwareConfig:
imageVersion: 2.0.35-debian10
jobs:
- stepId: someJob
sparkJob:
mainClass: SomeClass
- stepId: otherJob
prerequisiteStepIds:
- someJob
prestoJob:
queryFileUri: someuri
Create WorkflowTemplate Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new WorkflowTemplate(name: string, args: WorkflowTemplateArgs, opts?: CustomResourceOptions);
@overload
def WorkflowTemplate(resource_name: str,
args: WorkflowTemplateArgs,
opts: Optional[ResourceOptions] = None)
@overload
def WorkflowTemplate(resource_name: str,
opts: Optional[ResourceOptions] = None,
jobs: Optional[Sequence[WorkflowTemplateJobArgs]] = None,
location: Optional[str] = None,
placement: Optional[WorkflowTemplatePlacementArgs] = None,
dag_timeout: Optional[str] = None,
encryption_config: Optional[WorkflowTemplateEncryptionConfigArgs] = None,
labels: Optional[Mapping[str, str]] = None,
name: Optional[str] = None,
parameters: Optional[Sequence[WorkflowTemplateParameterArgs]] = None,
project: Optional[str] = None,
version: Optional[int] = None)
func NewWorkflowTemplate(ctx *Context, name string, args WorkflowTemplateArgs, opts ...ResourceOption) (*WorkflowTemplate, error)
public WorkflowTemplate(string name, WorkflowTemplateArgs args, CustomResourceOptions? opts = null)
public WorkflowTemplate(String name, WorkflowTemplateArgs args)
public WorkflowTemplate(String name, WorkflowTemplateArgs args, CustomResourceOptions options)
type: gcp:dataproc:WorkflowTemplate
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name
This property is required. string - The unique name of the resource.
- args
This property is required. WorkflowTemplateArgs - The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name
This property is required. str - The unique name of the resource.
- args
This property is required. WorkflowTemplateArgs - The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name
This property is required. string - The unique name of the resource.
- args
This property is required. WorkflowTemplateArgs - The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name
This property is required. string - The unique name of the resource.
- args
This property is required. WorkflowTemplateArgs - The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name
This property is required. String - The unique name of the resource.
- args
This property is required. WorkflowTemplateArgs - The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var workflowTemplateResource = new Gcp.Dataproc.WorkflowTemplate("workflowTemplateResource", new()
{
Jobs = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplateJobArgs
{
StepId = "string",
HadoopJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobHadoopJobArgs
{
ArchiveUris = new[]
{
"string",
},
Args = new[]
{
"string",
},
FileUris = new[]
{
"string",
},
JarFileUris = new[]
{
"string",
},
LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobHadoopJobLoggingConfigArgs
{
DriverLogLevels =
{
{ "string", "string" },
},
},
MainClass = "string",
MainJarFileUri = "string",
Properties =
{
{ "string", "string" },
},
},
HiveJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobHiveJobArgs
{
ContinueOnFailure = false,
JarFileUris = new[]
{
"string",
},
Properties =
{
{ "string", "string" },
},
QueryFileUri = "string",
QueryList = new Gcp.Dataproc.Inputs.WorkflowTemplateJobHiveJobQueryListArgs
{
Queries = new[]
{
"string",
},
},
ScriptVariables =
{
{ "string", "string" },
},
},
Labels =
{
{ "string", "string" },
},
PigJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPigJobArgs
{
ContinueOnFailure = false,
JarFileUris = new[]
{
"string",
},
LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPigJobLoggingConfigArgs
{
DriverLogLevels =
{
{ "string", "string" },
},
},
Properties =
{
{ "string", "string" },
},
QueryFileUri = "string",
QueryList = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPigJobQueryListArgs
{
Queries = new[]
{
"string",
},
},
ScriptVariables =
{
{ "string", "string" },
},
},
PrerequisiteStepIds = new[]
{
"string",
},
PrestoJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPrestoJobArgs
{
ClientTags = new[]
{
"string",
},
ContinueOnFailure = false,
LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPrestoJobLoggingConfigArgs
{
DriverLogLevels =
{
{ "string", "string" },
},
},
OutputFormat = "string",
Properties =
{
{ "string", "string" },
},
QueryFileUri = "string",
QueryList = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPrestoJobQueryListArgs
{
Queries = new[]
{
"string",
},
},
},
PysparkJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPysparkJobArgs
{
MainPythonFileUri = "string",
ArchiveUris = new[]
{
"string",
},
Args = new[]
{
"string",
},
FileUris = new[]
{
"string",
},
JarFileUris = new[]
{
"string",
},
LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobPysparkJobLoggingConfigArgs
{
DriverLogLevels =
{
{ "string", "string" },
},
},
Properties =
{
{ "string", "string" },
},
PythonFileUris = new[]
{
"string",
},
},
Scheduling = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSchedulingArgs
{
MaxFailuresPerHour = 0,
MaxFailuresTotal = 0,
},
SparkJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkJobArgs
{
ArchiveUris = new[]
{
"string",
},
Args = new[]
{
"string",
},
FileUris = new[]
{
"string",
},
JarFileUris = new[]
{
"string",
},
LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkJobLoggingConfigArgs
{
DriverLogLevels =
{
{ "string", "string" },
},
},
MainClass = "string",
MainJarFileUri = "string",
Properties =
{
{ "string", "string" },
},
},
SparkRJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkRJobArgs
{
MainRFileUri = "string",
ArchiveUris = new[]
{
"string",
},
Args = new[]
{
"string",
},
FileUris = new[]
{
"string",
},
LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkRJobLoggingConfigArgs
{
DriverLogLevels =
{
{ "string", "string" },
},
},
Properties =
{
{ "string", "string" },
},
},
SparkSqlJob = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkSqlJobArgs
{
JarFileUris = new[]
{
"string",
},
LoggingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkSqlJobLoggingConfigArgs
{
DriverLogLevels =
{
{ "string", "string" },
},
},
Properties =
{
{ "string", "string" },
},
QueryFileUri = "string",
QueryList = new Gcp.Dataproc.Inputs.WorkflowTemplateJobSparkSqlJobQueryListArgs
{
Queries = new[]
{
"string",
},
},
ScriptVariables =
{
{ "string", "string" },
},
},
},
},
Location = "string",
Placement = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementArgs
{
ClusterSelector = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementClusterSelectorArgs
{
ClusterLabels =
{
{ "string", "string" },
},
Zone = "string",
},
ManagedCluster = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterArgs
{
ClusterName = "string",
Config = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigArgs
{
AutoscalingConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs
{
Policy = "string",
},
EncryptionConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs
{
GcePdKmsKeyName = "string",
},
EndpointConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs
{
EnableHttpPortAccess = false,
HttpPorts =
{
{ "string", "string" },
},
},
GceClusterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs
{
InternalIpOnly = false,
Metadata =
{
{ "string", "string" },
},
Network = "string",
NodeGroupAffinity = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs
{
NodeGroup = "string",
},
PrivateIpv6GoogleAccess = "string",
ReservationAffinity = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs
{
ConsumeReservationType = "string",
Key = "string",
Values = new[]
{
"string",
},
},
ServiceAccount = "string",
ServiceAccountScopes = new[]
{
"string",
},
ShieldedInstanceConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs
{
EnableIntegrityMonitoring = false,
EnableSecureBoot = false,
EnableVtpm = false,
},
Subnetwork = "string",
Tags = new[]
{
"string",
},
Zone = "string",
},
GkeClusterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs
{
NamespacedGkeDeploymentTarget = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs
{
ClusterNamespace = "string",
TargetGkeCluster = "string",
},
},
InitializationActions = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs
{
ExecutableFile = "string",
ExecutionTimeout = "string",
},
},
LifecycleConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs
{
AutoDeleteTime = "string",
AutoDeleteTtl = "string",
IdleDeleteTtl = "string",
IdleStartTime = "string",
},
MasterConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs
{
Accelerators = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs
{
AcceleratorCount = 0,
AcceleratorType = "string",
},
},
DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs
{
BootDiskSizeGb = 0,
BootDiskType = "string",
NumLocalSsds = 0,
},
Image = "string",
InstanceNames = new[]
{
"string",
},
IsPreemptible = false,
MachineType = "string",
ManagedGroupConfigs = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs
{
InstanceGroupManagerName = "string",
InstanceTemplateName = "string",
},
},
MinCpuPlatform = "string",
NumInstances = 0,
Preemptibility = "string",
},
MetastoreConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs
{
DataprocMetastoreService = "string",
},
SecondaryWorkerConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs
{
Accelerators = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs
{
AcceleratorCount = 0,
AcceleratorType = "string",
},
},
DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs
{
BootDiskSizeGb = 0,
BootDiskType = "string",
NumLocalSsds = 0,
},
Image = "string",
InstanceNames = new[]
{
"string",
},
IsPreemptible = false,
MachineType = "string",
ManagedGroupConfigs = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs
{
InstanceGroupManagerName = "string",
InstanceTemplateName = "string",
},
},
MinCpuPlatform = "string",
NumInstances = 0,
Preemptibility = "string",
},
SecurityConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs
{
KerberosConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs
{
CrossRealmTrustAdminServer = "string",
CrossRealmTrustKdc = "string",
CrossRealmTrustRealm = "string",
CrossRealmTrustSharedPassword = "string",
EnableKerberos = false,
KdcDbKey = "string",
KeyPassword = "string",
Keystore = "string",
KeystorePassword = "string",
KmsKey = "string",
Realm = "string",
RootPrincipalPassword = "string",
TgtLifetimeHours = 0,
Truststore = "string",
TruststorePassword = "string",
},
},
SoftwareConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs
{
ImageVersion = "string",
OptionalComponents = new[]
{
"string",
},
Properties =
{
{ "string", "string" },
},
},
StagingBucket = "string",
TempBucket = "string",
WorkerConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs
{
Accelerators = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs
{
AcceleratorCount = 0,
AcceleratorType = "string",
},
},
DiskConfig = new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs
{
BootDiskSizeGb = 0,
BootDiskType = "string",
NumLocalSsds = 0,
},
Image = "string",
InstanceNames = new[]
{
"string",
},
IsPreemptible = false,
MachineType = "string",
ManagedGroupConfigs = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs
{
InstanceGroupManagerName = "string",
InstanceTemplateName = "string",
},
},
MinCpuPlatform = "string",
NumInstances = 0,
Preemptibility = "string",
},
},
Labels =
{
{ "string", "string" },
},
},
},
DagTimeout = "string",
EncryptionConfig = new Gcp.Dataproc.Inputs.WorkflowTemplateEncryptionConfigArgs
{
KmsKey = "string",
},
Labels =
{
{ "string", "string" },
},
Name = "string",
Parameters = new[]
{
new Gcp.Dataproc.Inputs.WorkflowTemplateParameterArgs
{
Fields = new[]
{
"string",
},
Name = "string",
Description = "string",
Validation = new Gcp.Dataproc.Inputs.WorkflowTemplateParameterValidationArgs
{
Regex = new Gcp.Dataproc.Inputs.WorkflowTemplateParameterValidationRegexArgs
{
Regexes = new[]
{
"string",
},
},
Values = new Gcp.Dataproc.Inputs.WorkflowTemplateParameterValidationValuesArgs
{
Values = new[]
{
"string",
},
},
},
},
},
Project = "string",
});
example, err := dataproc.NewWorkflowTemplate(ctx, "workflowTemplateResource", &dataproc.WorkflowTemplateArgs{
Jobs: dataproc.WorkflowTemplateJobArray{
&dataproc.WorkflowTemplateJobArgs{
StepId: pulumi.String("string"),
HadoopJob: &dataproc.WorkflowTemplateJobHadoopJobArgs{
ArchiveUris: pulumi.StringArray{
pulumi.String("string"),
},
Args: pulumi.StringArray{
pulumi.String("string"),
},
FileUris: pulumi.StringArray{
pulumi.String("string"),
},
JarFileUris: pulumi.StringArray{
pulumi.String("string"),
},
LoggingConfig: &dataproc.WorkflowTemplateJobHadoopJobLoggingConfigArgs{
DriverLogLevels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
MainClass: pulumi.String("string"),
MainJarFileUri: pulumi.String("string"),
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
HiveJob: &dataproc.WorkflowTemplateJobHiveJobArgs{
ContinueOnFailure: pulumi.Bool(false),
JarFileUris: pulumi.StringArray{
pulumi.String("string"),
},
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
QueryFileUri: pulumi.String("string"),
QueryList: &dataproc.WorkflowTemplateJobHiveJobQueryListArgs{
Queries: pulumi.StringArray{
pulumi.String("string"),
},
},
ScriptVariables: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
PigJob: &dataproc.WorkflowTemplateJobPigJobArgs{
ContinueOnFailure: pulumi.Bool(false),
JarFileUris: pulumi.StringArray{
pulumi.String("string"),
},
LoggingConfig: &dataproc.WorkflowTemplateJobPigJobLoggingConfigArgs{
DriverLogLevels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
QueryFileUri: pulumi.String("string"),
QueryList: &dataproc.WorkflowTemplateJobPigJobQueryListArgs{
Queries: pulumi.StringArray{
pulumi.String("string"),
},
},
ScriptVariables: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
PrerequisiteStepIds: pulumi.StringArray{
pulumi.String("string"),
},
PrestoJob: &dataproc.WorkflowTemplateJobPrestoJobArgs{
ClientTags: pulumi.StringArray{
pulumi.String("string"),
},
ContinueOnFailure: pulumi.Bool(false),
LoggingConfig: &dataproc.WorkflowTemplateJobPrestoJobLoggingConfigArgs{
DriverLogLevels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
OutputFormat: pulumi.String("string"),
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
QueryFileUri: pulumi.String("string"),
QueryList: &dataproc.WorkflowTemplateJobPrestoJobQueryListArgs{
Queries: pulumi.StringArray{
pulumi.String("string"),
},
},
},
PysparkJob: &dataproc.WorkflowTemplateJobPysparkJobArgs{
MainPythonFileUri: pulumi.String("string"),
ArchiveUris: pulumi.StringArray{
pulumi.String("string"),
},
Args: pulumi.StringArray{
pulumi.String("string"),
},
FileUris: pulumi.StringArray{
pulumi.String("string"),
},
JarFileUris: pulumi.StringArray{
pulumi.String("string"),
},
LoggingConfig: &dataproc.WorkflowTemplateJobPysparkJobLoggingConfigArgs{
DriverLogLevels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
PythonFileUris: pulumi.StringArray{
pulumi.String("string"),
},
},
Scheduling: &dataproc.WorkflowTemplateJobSchedulingArgs{
MaxFailuresPerHour: pulumi.Int(0),
MaxFailuresTotal: pulumi.Int(0),
},
SparkJob: &dataproc.WorkflowTemplateJobSparkJobArgs{
ArchiveUris: pulumi.StringArray{
pulumi.String("string"),
},
Args: pulumi.StringArray{
pulumi.String("string"),
},
FileUris: pulumi.StringArray{
pulumi.String("string"),
},
JarFileUris: pulumi.StringArray{
pulumi.String("string"),
},
LoggingConfig: &dataproc.WorkflowTemplateJobSparkJobLoggingConfigArgs{
DriverLogLevels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
MainClass: pulumi.String("string"),
MainJarFileUri: pulumi.String("string"),
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
SparkRJob: &dataproc.WorkflowTemplateJobSparkRJobArgs{
MainRFileUri: pulumi.String("string"),
ArchiveUris: pulumi.StringArray{
pulumi.String("string"),
},
Args: pulumi.StringArray{
pulumi.String("string"),
},
FileUris: pulumi.StringArray{
pulumi.String("string"),
},
LoggingConfig: &dataproc.WorkflowTemplateJobSparkRJobLoggingConfigArgs{
DriverLogLevels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
SparkSqlJob: &dataproc.WorkflowTemplateJobSparkSqlJobArgs{
JarFileUris: pulumi.StringArray{
pulumi.String("string"),
},
LoggingConfig: &dataproc.WorkflowTemplateJobSparkSqlJobLoggingConfigArgs{
DriverLogLevels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
QueryFileUri: pulumi.String("string"),
QueryList: &dataproc.WorkflowTemplateJobSparkSqlJobQueryListArgs{
Queries: pulumi.StringArray{
pulumi.String("string"),
},
},
ScriptVariables: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
},
},
Location: pulumi.String("string"),
Placement: &dataproc.WorkflowTemplatePlacementArgs{
ClusterSelector: &dataproc.WorkflowTemplatePlacementClusterSelectorArgs{
ClusterLabels: pulumi.StringMap{
"string": pulumi.String("string"),
},
Zone: pulumi.String("string"),
},
ManagedCluster: &dataproc.WorkflowTemplatePlacementManagedClusterArgs{
ClusterName: pulumi.String("string"),
Config: &dataproc.WorkflowTemplatePlacementManagedClusterConfigArgs{
AutoscalingConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs{
Policy: pulumi.String("string"),
},
EncryptionConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs{
GcePdKmsKeyName: pulumi.String("string"),
},
EndpointConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs{
EnableHttpPortAccess: pulumi.Bool(false),
HttpPorts: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
GceClusterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs{
InternalIpOnly: pulumi.Bool(false),
Metadata: pulumi.StringMap{
"string": pulumi.String("string"),
},
Network: pulumi.String("string"),
NodeGroupAffinity: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs{
NodeGroup: pulumi.String("string"),
},
PrivateIpv6GoogleAccess: pulumi.String("string"),
ReservationAffinity: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs{
ConsumeReservationType: pulumi.String("string"),
Key: pulumi.String("string"),
Values: pulumi.StringArray{
pulumi.String("string"),
},
},
ServiceAccount: pulumi.String("string"),
ServiceAccountScopes: pulumi.StringArray{
pulumi.String("string"),
},
ShieldedInstanceConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs{
EnableIntegrityMonitoring: pulumi.Bool(false),
EnableSecureBoot: pulumi.Bool(false),
EnableVtpm: pulumi.Bool(false),
},
Subnetwork: pulumi.String("string"),
Tags: pulumi.StringArray{
pulumi.String("string"),
},
Zone: pulumi.String("string"),
},
GkeClusterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs{
NamespacedGkeDeploymentTarget: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs{
ClusterNamespace: pulumi.String("string"),
TargetGkeCluster: pulumi.String("string"),
},
},
InitializationActions: dataproc.WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray{
&dataproc.WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs{
ExecutableFile: pulumi.String("string"),
ExecutionTimeout: pulumi.String("string"),
},
},
LifecycleConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs{
AutoDeleteTime: pulumi.String("string"),
AutoDeleteTtl: pulumi.String("string"),
IdleDeleteTtl: pulumi.String("string"),
IdleStartTime: pulumi.String("string"),
},
MasterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs{
Accelerators: dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray{
&dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs{
AcceleratorCount: pulumi.Int(0),
AcceleratorType: pulumi.String("string"),
},
},
DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs{
BootDiskSizeGb: pulumi.Int(0),
BootDiskType: pulumi.String("string"),
NumLocalSsds: pulumi.Int(0),
},
Image: pulumi.String("string"),
InstanceNames: pulumi.StringArray{
pulumi.String("string"),
},
IsPreemptible: pulumi.Bool(false),
MachineType: pulumi.String("string"),
ManagedGroupConfigs: dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray{
&dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs{
InstanceGroupManagerName: pulumi.String("string"),
InstanceTemplateName: pulumi.String("string"),
},
},
MinCpuPlatform: pulumi.String("string"),
NumInstances: pulumi.Int(0),
Preemptibility: pulumi.String("string"),
},
MetastoreConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs{
DataprocMetastoreService: pulumi.String("string"),
},
SecondaryWorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs{
Accelerators: dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray{
&dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs{
AcceleratorCount: pulumi.Int(0),
AcceleratorType: pulumi.String("string"),
},
},
DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs{
BootDiskSizeGb: pulumi.Int(0),
BootDiskType: pulumi.String("string"),
NumLocalSsds: pulumi.Int(0),
},
Image: pulumi.String("string"),
InstanceNames: pulumi.StringArray{
pulumi.String("string"),
},
IsPreemptible: pulumi.Bool(false),
MachineType: pulumi.String("string"),
ManagedGroupConfigs: dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray{
&dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs{
InstanceGroupManagerName: pulumi.String("string"),
InstanceTemplateName: pulumi.String("string"),
},
},
MinCpuPlatform: pulumi.String("string"),
NumInstances: pulumi.Int(0),
Preemptibility: pulumi.String("string"),
},
SecurityConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs{
KerberosConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs{
CrossRealmTrustAdminServer: pulumi.String("string"),
CrossRealmTrustKdc: pulumi.String("string"),
CrossRealmTrustRealm: pulumi.String("string"),
CrossRealmTrustSharedPassword: pulumi.String("string"),
EnableKerberos: pulumi.Bool(false),
KdcDbKey: pulumi.String("string"),
KeyPassword: pulumi.String("string"),
Keystore: pulumi.String("string"),
KeystorePassword: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Realm: pulumi.String("string"),
RootPrincipalPassword: pulumi.String("string"),
TgtLifetimeHours: pulumi.Int(0),
Truststore: pulumi.String("string"),
TruststorePassword: pulumi.String("string"),
},
},
SoftwareConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs{
ImageVersion: pulumi.String("string"),
OptionalComponents: pulumi.StringArray{
pulumi.String("string"),
},
Properties: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
StagingBucket: pulumi.String("string"),
TempBucket: pulumi.String("string"),
WorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs{
Accelerators: dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray{
&dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs{
AcceleratorCount: pulumi.Int(0),
AcceleratorType: pulumi.String("string"),
},
},
DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs{
BootDiskSizeGb: pulumi.Int(0),
BootDiskType: pulumi.String("string"),
NumLocalSsds: pulumi.Int(0),
},
Image: pulumi.String("string"),
InstanceNames: pulumi.StringArray{
pulumi.String("string"),
},
IsPreemptible: pulumi.Bool(false),
MachineType: pulumi.String("string"),
ManagedGroupConfigs: dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray{
&dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs{
InstanceGroupManagerName: pulumi.String("string"),
InstanceTemplateName: pulumi.String("string"),
},
},
MinCpuPlatform: pulumi.String("string"),
NumInstances: pulumi.Int(0),
Preemptibility: pulumi.String("string"),
},
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
},
DagTimeout: pulumi.String("string"),
EncryptionConfig: &dataproc.WorkflowTemplateEncryptionConfigArgs{
KmsKey: pulumi.String("string"),
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
Name: pulumi.String("string"),
Parameters: dataproc.WorkflowTemplateParameterArray{
&dataproc.WorkflowTemplateParameterArgs{
Fields: pulumi.StringArray{
pulumi.String("string"),
},
Name: pulumi.String("string"),
Description: pulumi.String("string"),
Validation: &dataproc.WorkflowTemplateParameterValidationArgs{
Regex: &dataproc.WorkflowTemplateParameterValidationRegexArgs{
Regexes: pulumi.StringArray{
pulumi.String("string"),
},
},
Values: &dataproc.WorkflowTemplateParameterValidationValuesArgs{
Values: pulumi.StringArray{
pulumi.String("string"),
},
},
},
},
},
Project: pulumi.String("string"),
})
var workflowTemplateResource = new WorkflowTemplate("workflowTemplateResource", WorkflowTemplateArgs.builder()
.jobs(WorkflowTemplateJobArgs.builder()
.stepId("string")
.hadoopJob(WorkflowTemplateJobHadoopJobArgs.builder()
.archiveUris("string")
.args("string")
.fileUris("string")
.jarFileUris("string")
.loggingConfig(WorkflowTemplateJobHadoopJobLoggingConfigArgs.builder()
.driverLogLevels(Map.of("string", "string"))
.build())
.mainClass("string")
.mainJarFileUri("string")
.properties(Map.of("string", "string"))
.build())
.hiveJob(WorkflowTemplateJobHiveJobArgs.builder()
.continueOnFailure(false)
.jarFileUris("string")
.properties(Map.of("string", "string"))
.queryFileUri("string")
.queryList(WorkflowTemplateJobHiveJobQueryListArgs.builder()
.queries("string")
.build())
.scriptVariables(Map.of("string", "string"))
.build())
.labels(Map.of("string", "string"))
.pigJob(WorkflowTemplateJobPigJobArgs.builder()
.continueOnFailure(false)
.jarFileUris("string")
.loggingConfig(WorkflowTemplateJobPigJobLoggingConfigArgs.builder()
.driverLogLevels(Map.of("string", "string"))
.build())
.properties(Map.of("string", "string"))
.queryFileUri("string")
.queryList(WorkflowTemplateJobPigJobQueryListArgs.builder()
.queries("string")
.build())
.scriptVariables(Map.of("string", "string"))
.build())
.prerequisiteStepIds("string")
.prestoJob(WorkflowTemplateJobPrestoJobArgs.builder()
.clientTags("string")
.continueOnFailure(false)
.loggingConfig(WorkflowTemplateJobPrestoJobLoggingConfigArgs.builder()
.driverLogLevels(Map.of("string", "string"))
.build())
.outputFormat("string")
.properties(Map.of("string", "string"))
.queryFileUri("string")
.queryList(WorkflowTemplateJobPrestoJobQueryListArgs.builder()
.queries("string")
.build())
.build())
.pysparkJob(WorkflowTemplateJobPysparkJobArgs.builder()
.mainPythonFileUri("string")
.archiveUris("string")
.args("string")
.fileUris("string")
.jarFileUris("string")
.loggingConfig(WorkflowTemplateJobPysparkJobLoggingConfigArgs.builder()
.driverLogLevels(Map.of("string", "string"))
.build())
.properties(Map.of("string", "string"))
.pythonFileUris("string")
.build())
.scheduling(WorkflowTemplateJobSchedulingArgs.builder()
.maxFailuresPerHour(0)
.maxFailuresTotal(0)
.build())
.sparkJob(WorkflowTemplateJobSparkJobArgs.builder()
.archiveUris("string")
.args("string")
.fileUris("string")
.jarFileUris("string")
.loggingConfig(WorkflowTemplateJobSparkJobLoggingConfigArgs.builder()
.driverLogLevels(Map.of("string", "string"))
.build())
.mainClass("string")
.mainJarFileUri("string")
.properties(Map.of("string", "string"))
.build())
.sparkRJob(WorkflowTemplateJobSparkRJobArgs.builder()
.mainRFileUri("string")
.archiveUris("string")
.args("string")
.fileUris("string")
.loggingConfig(WorkflowTemplateJobSparkRJobLoggingConfigArgs.builder()
.driverLogLevels(Map.of("string", "string"))
.build())
.properties(Map.of("string", "string"))
.build())
.sparkSqlJob(WorkflowTemplateJobSparkSqlJobArgs.builder()
.jarFileUris("string")
.loggingConfig(WorkflowTemplateJobSparkSqlJobLoggingConfigArgs.builder()
.driverLogLevels(Map.of("string", "string"))
.build())
.properties(Map.of("string", "string"))
.queryFileUri("string")
.queryList(WorkflowTemplateJobSparkSqlJobQueryListArgs.builder()
.queries("string")
.build())
.scriptVariables(Map.of("string", "string"))
.build())
.build())
.location("string")
.placement(WorkflowTemplatePlacementArgs.builder()
.clusterSelector(WorkflowTemplatePlacementClusterSelectorArgs.builder()
.clusterLabels(Map.of("string", "string"))
.zone("string")
.build())
.managedCluster(WorkflowTemplatePlacementManagedClusterArgs.builder()
.clusterName("string")
.config(WorkflowTemplatePlacementManagedClusterConfigArgs.builder()
.autoscalingConfig(WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs.builder()
.policy("string")
.build())
.encryptionConfig(WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs.builder()
.gcePdKmsKeyName("string")
.build())
.endpointConfig(WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs.builder()
.enableHttpPortAccess(false)
.httpPorts(Map.of("string", "string"))
.build())
.gceClusterConfig(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.builder()
.internalIpOnly(false)
.metadata(Map.of("string", "string"))
.network("string")
.nodeGroupAffinity(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs.builder()
.nodeGroup("string")
.build())
.privateIpv6GoogleAccess("string")
.reservationAffinity(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs.builder()
.consumeReservationType("string")
.key("string")
.values("string")
.build())
.serviceAccount("string")
.serviceAccountScopes("string")
.shieldedInstanceConfig(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs.builder()
.enableIntegrityMonitoring(false)
.enableSecureBoot(false)
.enableVtpm(false)
.build())
.subnetwork("string")
.tags("string")
.zone("string")
.build())
.gkeClusterConfig(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs.builder()
.namespacedGkeDeploymentTarget(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs.builder()
.clusterNamespace("string")
.targetGkeCluster("string")
.build())
.build())
.initializationActions(WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.builder()
.executableFile("string")
.executionTimeout("string")
.build())
.lifecycleConfig(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.builder()
.autoDeleteTime("string")
.autoDeleteTtl("string")
.idleDeleteTtl("string")
.idleStartTime("string")
.build())
.masterConfig(WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.builder()
.accelerators(WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs.builder()
.acceleratorCount(0)
.acceleratorType("string")
.build())
.diskConfig(WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs.builder()
.bootDiskSizeGb(0)
.bootDiskType("string")
.numLocalSsds(0)
.build())
.image("string")
.instanceNames("string")
.isPreemptible(false)
.machineType("string")
.managedGroupConfigs(WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs.builder()
.instanceGroupManagerName("string")
.instanceTemplateName("string")
.build())
.minCpuPlatform("string")
.numInstances(0)
.preemptibility("string")
.build())
.metastoreConfig(WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs.builder()
.dataprocMetastoreService("string")
.build())
.secondaryWorkerConfig(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs.builder()
.accelerators(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs.builder()
.acceleratorCount(0)
.acceleratorType("string")
.build())
.diskConfig(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs.builder()
.bootDiskSizeGb(0)
.bootDiskType("string")
.numLocalSsds(0)
.build())
.image("string")
.instanceNames("string")
.isPreemptible(false)
.machineType("string")
.managedGroupConfigs(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs.builder()
.instanceGroupManagerName("string")
.instanceTemplateName("string")
.build())
.minCpuPlatform("string")
.numInstances(0)
.preemptibility("string")
.build())
.securityConfig(WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs.builder()
.kerberosConfig(WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs.builder()
.crossRealmTrustAdminServer("string")
.crossRealmTrustKdc("string")
.crossRealmTrustRealm("string")
.crossRealmTrustSharedPassword("string")
.enableKerberos(false)
.kdcDbKey("string")
.keyPassword("string")
.keystore("string")
.keystorePassword("string")
.kmsKey("string")
.realm("string")
.rootPrincipalPassword("string")
.tgtLifetimeHours(0)
.truststore("string")
.truststorePassword("string")
.build())
.build())
.softwareConfig(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs.builder()
.imageVersion("string")
.optionalComponents("string")
.properties(Map.of("string", "string"))
.build())
.stagingBucket("string")
.tempBucket("string")
.workerConfig(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs.builder()
.accelerators(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs.builder()
.acceleratorCount(0)
.acceleratorType("string")
.build())
.diskConfig(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs.builder()
.bootDiskSizeGb(0)
.bootDiskType("string")
.numLocalSsds(0)
.build())
.image("string")
.instanceNames("string")
.isPreemptible(false)
.machineType("string")
.managedGroupConfigs(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs.builder()
.instanceGroupManagerName("string")
.instanceTemplateName("string")
.build())
.minCpuPlatform("string")
.numInstances(0)
.preemptibility("string")
.build())
.build())
.labels(Map.of("string", "string"))
.build())
.build())
.dagTimeout("string")
.encryptionConfig(WorkflowTemplateEncryptionConfigArgs.builder()
.kmsKey("string")
.build())
.labels(Map.of("string", "string"))
.name("string")
.parameters(WorkflowTemplateParameterArgs.builder()
.fields("string")
.name("string")
.description("string")
.validation(WorkflowTemplateParameterValidationArgs.builder()
.regex(WorkflowTemplateParameterValidationRegexArgs.builder()
.regexes("string")
.build())
.values(WorkflowTemplateParameterValidationValuesArgs.builder()
.values("string")
.build())
.build())
.build())
.project("string")
.build());
workflow_template_resource = gcp.dataproc.WorkflowTemplate("workflowTemplateResource",
jobs=[{
"step_id": "string",
"hadoop_job": {
"archive_uris": ["string"],
"args": ["string"],
"file_uris": ["string"],
"jar_file_uris": ["string"],
"logging_config": {
"driver_log_levels": {
"string": "string",
},
},
"main_class": "string",
"main_jar_file_uri": "string",
"properties": {
"string": "string",
},
},
"hive_job": {
"continue_on_failure": False,
"jar_file_uris": ["string"],
"properties": {
"string": "string",
},
"query_file_uri": "string",
"query_list": {
"queries": ["string"],
},
"script_variables": {
"string": "string",
},
},
"labels": {
"string": "string",
},
"pig_job": {
"continue_on_failure": False,
"jar_file_uris": ["string"],
"logging_config": {
"driver_log_levels": {
"string": "string",
},
},
"properties": {
"string": "string",
},
"query_file_uri": "string",
"query_list": {
"queries": ["string"],
},
"script_variables": {
"string": "string",
},
},
"prerequisite_step_ids": ["string"],
"presto_job": {
"client_tags": ["string"],
"continue_on_failure": False,
"logging_config": {
"driver_log_levels": {
"string": "string",
},
},
"output_format": "string",
"properties": {
"string": "string",
},
"query_file_uri": "string",
"query_list": {
"queries": ["string"],
},
},
"pyspark_job": {
"main_python_file_uri": "string",
"archive_uris": ["string"],
"args": ["string"],
"file_uris": ["string"],
"jar_file_uris": ["string"],
"logging_config": {
"driver_log_levels": {
"string": "string",
},
},
"properties": {
"string": "string",
},
"python_file_uris": ["string"],
},
"scheduling": {
"max_failures_per_hour": 0,
"max_failures_total": 0,
},
"spark_job": {
"archive_uris": ["string"],
"args": ["string"],
"file_uris": ["string"],
"jar_file_uris": ["string"],
"logging_config": {
"driver_log_levels": {
"string": "string",
},
},
"main_class": "string",
"main_jar_file_uri": "string",
"properties": {
"string": "string",
},
},
"spark_r_job": {
"main_r_file_uri": "string",
"archive_uris": ["string"],
"args": ["string"],
"file_uris": ["string"],
"logging_config": {
"driver_log_levels": {
"string": "string",
},
},
"properties": {
"string": "string",
},
},
"spark_sql_job": {
"jar_file_uris": ["string"],
"logging_config": {
"driver_log_levels": {
"string": "string",
},
},
"properties": {
"string": "string",
},
"query_file_uri": "string",
"query_list": {
"queries": ["string"],
},
"script_variables": {
"string": "string",
},
},
}],
location="string",
placement={
"cluster_selector": {
"cluster_labels": {
"string": "string",
},
"zone": "string",
},
"managed_cluster": {
"cluster_name": "string",
"config": {
"autoscaling_config": {
"policy": "string",
},
"encryption_config": {
"gce_pd_kms_key_name": "string",
},
"endpoint_config": {
"enable_http_port_access": False,
"http_ports": {
"string": "string",
},
},
"gce_cluster_config": {
"internal_ip_only": False,
"metadata": {
"string": "string",
},
"network": "string",
"node_group_affinity": {
"node_group": "string",
},
"private_ipv6_google_access": "string",
"reservation_affinity": {
"consume_reservation_type": "string",
"key": "string",
"values": ["string"],
},
"service_account": "string",
"service_account_scopes": ["string"],
"shielded_instance_config": {
"enable_integrity_monitoring": False,
"enable_secure_boot": False,
"enable_vtpm": False,
},
"subnetwork": "string",
"tags": ["string"],
"zone": "string",
},
"gke_cluster_config": {
"namespaced_gke_deployment_target": {
"cluster_namespace": "string",
"target_gke_cluster": "string",
},
},
"initialization_actions": [{
"executable_file": "string",
"execution_timeout": "string",
}],
"lifecycle_config": {
"auto_delete_time": "string",
"auto_delete_ttl": "string",
"idle_delete_ttl": "string",
"idle_start_time": "string",
},
"master_config": {
"accelerators": [{
"accelerator_count": 0,
"accelerator_type": "string",
}],
"disk_config": {
"boot_disk_size_gb": 0,
"boot_disk_type": "string",
"num_local_ssds": 0,
},
"image": "string",
"instance_names": ["string"],
"is_preemptible": False,
"machine_type": "string",
"managed_group_configs": [{
"instance_group_manager_name": "string",
"instance_template_name": "string",
}],
"min_cpu_platform": "string",
"num_instances": 0,
"preemptibility": "string",
},
"metastore_config": {
"dataproc_metastore_service": "string",
},
"secondary_worker_config": {
"accelerators": [{
"accelerator_count": 0,
"accelerator_type": "string",
}],
"disk_config": {
"boot_disk_size_gb": 0,
"boot_disk_type": "string",
"num_local_ssds": 0,
},
"image": "string",
"instance_names": ["string"],
"is_preemptible": False,
"machine_type": "string",
"managed_group_configs": [{
"instance_group_manager_name": "string",
"instance_template_name": "string",
}],
"min_cpu_platform": "string",
"num_instances": 0,
"preemptibility": "string",
},
"security_config": {
"kerberos_config": {
"cross_realm_trust_admin_server": "string",
"cross_realm_trust_kdc": "string",
"cross_realm_trust_realm": "string",
"cross_realm_trust_shared_password": "string",
"enable_kerberos": False,
"kdc_db_key": "string",
"key_password": "string",
"keystore": "string",
"keystore_password": "string",
"kms_key": "string",
"realm": "string",
"root_principal_password": "string",
"tgt_lifetime_hours": 0,
"truststore": "string",
"truststore_password": "string",
},
},
"software_config": {
"image_version": "string",
"optional_components": ["string"],
"properties": {
"string": "string",
},
},
"staging_bucket": "string",
"temp_bucket": "string",
"worker_config": {
"accelerators": [{
"accelerator_count": 0,
"accelerator_type": "string",
}],
"disk_config": {
"boot_disk_size_gb": 0,
"boot_disk_type": "string",
"num_local_ssds": 0,
},
"image": "string",
"instance_names": ["string"],
"is_preemptible": False,
"machine_type": "string",
"managed_group_configs": [{
"instance_group_manager_name": "string",
"instance_template_name": "string",
}],
"min_cpu_platform": "string",
"num_instances": 0,
"preemptibility": "string",
},
},
"labels": {
"string": "string",
},
},
},
dag_timeout="string",
encryption_config={
"kms_key": "string",
},
labels={
"string": "string",
},
name="string",
parameters=[{
"fields": ["string"],
"name": "string",
"description": "string",
"validation": {
"regex": {
"regexes": ["string"],
},
"values": {
"values": ["string"],
},
},
}],
project="string")
const workflowTemplateResource = new gcp.dataproc.WorkflowTemplate("workflowTemplateResource", {
jobs: [{
stepId: "string",
hadoopJob: {
archiveUris: ["string"],
args: ["string"],
fileUris: ["string"],
jarFileUris: ["string"],
loggingConfig: {
driverLogLevels: {
string: "string",
},
},
mainClass: "string",
mainJarFileUri: "string",
properties: {
string: "string",
},
},
hiveJob: {
continueOnFailure: false,
jarFileUris: ["string"],
properties: {
string: "string",
},
queryFileUri: "string",
queryList: {
queries: ["string"],
},
scriptVariables: {
string: "string",
},
},
labels: {
string: "string",
},
pigJob: {
continueOnFailure: false,
jarFileUris: ["string"],
loggingConfig: {
driverLogLevels: {
string: "string",
},
},
properties: {
string: "string",
},
queryFileUri: "string",
queryList: {
queries: ["string"],
},
scriptVariables: {
string: "string",
},
},
prerequisiteStepIds: ["string"],
prestoJob: {
clientTags: ["string"],
continueOnFailure: false,
loggingConfig: {
driverLogLevels: {
string: "string",
},
},
outputFormat: "string",
properties: {
string: "string",
},
queryFileUri: "string",
queryList: {
queries: ["string"],
},
},
pysparkJob: {
mainPythonFileUri: "string",
archiveUris: ["string"],
args: ["string"],
fileUris: ["string"],
jarFileUris: ["string"],
loggingConfig: {
driverLogLevels: {
string: "string",
},
},
properties: {
string: "string",
},
pythonFileUris: ["string"],
},
scheduling: {
maxFailuresPerHour: 0,
maxFailuresTotal: 0,
},
sparkJob: {
archiveUris: ["string"],
args: ["string"],
fileUris: ["string"],
jarFileUris: ["string"],
loggingConfig: {
driverLogLevels: {
string: "string",
},
},
mainClass: "string",
mainJarFileUri: "string",
properties: {
string: "string",
},
},
sparkRJob: {
mainRFileUri: "string",
archiveUris: ["string"],
args: ["string"],
fileUris: ["string"],
loggingConfig: {
driverLogLevels: {
string: "string",
},
},
properties: {
string: "string",
},
},
sparkSqlJob: {
jarFileUris: ["string"],
loggingConfig: {
driverLogLevels: {
string: "string",
},
},
properties: {
string: "string",
},
queryFileUri: "string",
queryList: {
queries: ["string"],
},
scriptVariables: {
string: "string",
},
},
}],
location: "string",
placement: {
clusterSelector: {
clusterLabels: {
string: "string",
},
zone: "string",
},
managedCluster: {
clusterName: "string",
config: {
autoscalingConfig: {
policy: "string",
},
encryptionConfig: {
gcePdKmsKeyName: "string",
},
endpointConfig: {
enableHttpPortAccess: false,
httpPorts: {
string: "string",
},
},
gceClusterConfig: {
internalIpOnly: false,
metadata: {
string: "string",
},
network: "string",
nodeGroupAffinity: {
nodeGroup: "string",
},
privateIpv6GoogleAccess: "string",
reservationAffinity: {
consumeReservationType: "string",
key: "string",
values: ["string"],
},
serviceAccount: "string",
serviceAccountScopes: ["string"],
shieldedInstanceConfig: {
enableIntegrityMonitoring: false,
enableSecureBoot: false,
enableVtpm: false,
},
subnetwork: "string",
tags: ["string"],
zone: "string",
},
gkeClusterConfig: {
namespacedGkeDeploymentTarget: {
clusterNamespace: "string",
targetGkeCluster: "string",
},
},
initializationActions: [{
executableFile: "string",
executionTimeout: "string",
}],
lifecycleConfig: {
autoDeleteTime: "string",
autoDeleteTtl: "string",
idleDeleteTtl: "string",
idleStartTime: "string",
},
masterConfig: {
accelerators: [{
acceleratorCount: 0,
acceleratorType: "string",
}],
diskConfig: {
bootDiskSizeGb: 0,
bootDiskType: "string",
numLocalSsds: 0,
},
image: "string",
instanceNames: ["string"],
isPreemptible: false,
machineType: "string",
managedGroupConfigs: [{
instanceGroupManagerName: "string",
instanceTemplateName: "string",
}],
minCpuPlatform: "string",
numInstances: 0,
preemptibility: "string",
},
metastoreConfig: {
dataprocMetastoreService: "string",
},
secondaryWorkerConfig: {
accelerators: [{
acceleratorCount: 0,
acceleratorType: "string",
}],
diskConfig: {
bootDiskSizeGb: 0,
bootDiskType: "string",
numLocalSsds: 0,
},
image: "string",
instanceNames: ["string"],
isPreemptible: false,
machineType: "string",
managedGroupConfigs: [{
instanceGroupManagerName: "string",
instanceTemplateName: "string",
}],
minCpuPlatform: "string",
numInstances: 0,
preemptibility: "string",
},
securityConfig: {
kerberosConfig: {
crossRealmTrustAdminServer: "string",
crossRealmTrustKdc: "string",
crossRealmTrustRealm: "string",
crossRealmTrustSharedPassword: "string",
enableKerberos: false,
kdcDbKey: "string",
keyPassword: "string",
keystore: "string",
keystorePassword: "string",
kmsKey: "string",
realm: "string",
rootPrincipalPassword: "string",
tgtLifetimeHours: 0,
truststore: "string",
truststorePassword: "string",
},
},
softwareConfig: {
imageVersion: "string",
optionalComponents: ["string"],
properties: {
string: "string",
},
},
stagingBucket: "string",
tempBucket: "string",
workerConfig: {
accelerators: [{
acceleratorCount: 0,
acceleratorType: "string",
}],
diskConfig: {
bootDiskSizeGb: 0,
bootDiskType: "string",
numLocalSsds: 0,
},
image: "string",
instanceNames: ["string"],
isPreemptible: false,
machineType: "string",
managedGroupConfigs: [{
instanceGroupManagerName: "string",
instanceTemplateName: "string",
}],
minCpuPlatform: "string",
numInstances: 0,
preemptibility: "string",
},
},
labels: {
string: "string",
},
},
},
dagTimeout: "string",
encryptionConfig: {
kmsKey: "string",
},
labels: {
string: "string",
},
name: "string",
parameters: [{
fields: ["string"],
name: "string",
description: "string",
validation: {
regex: {
regexes: ["string"],
},
values: {
values: ["string"],
},
},
}],
project: "string",
});
type: gcp:dataproc:WorkflowTemplate
properties:
dagTimeout: string
encryptionConfig:
kmsKey: string
jobs:
- hadoopJob:
archiveUris:
- string
args:
- string
fileUris:
- string
jarFileUris:
- string
loggingConfig:
driverLogLevels:
string: string
mainClass: string
mainJarFileUri: string
properties:
string: string
hiveJob:
continueOnFailure: false
jarFileUris:
- string
properties:
string: string
queryFileUri: string
queryList:
queries:
- string
scriptVariables:
string: string
labels:
string: string
pigJob:
continueOnFailure: false
jarFileUris:
- string
loggingConfig:
driverLogLevels:
string: string
properties:
string: string
queryFileUri: string
queryList:
queries:
- string
scriptVariables:
string: string
prerequisiteStepIds:
- string
prestoJob:
clientTags:
- string
continueOnFailure: false
loggingConfig:
driverLogLevels:
string: string
outputFormat: string
properties:
string: string
queryFileUri: string
queryList:
queries:
- string
pysparkJob:
archiveUris:
- string
args:
- string
fileUris:
- string
jarFileUris:
- string
loggingConfig:
driverLogLevels:
string: string
mainPythonFileUri: string
properties:
string: string
pythonFileUris:
- string
scheduling:
maxFailuresPerHour: 0
maxFailuresTotal: 0
sparkJob:
archiveUris:
- string
args:
- string
fileUris:
- string
jarFileUris:
- string
loggingConfig:
driverLogLevels:
string: string
mainClass: string
mainJarFileUri: string
properties:
string: string
sparkRJob:
archiveUris:
- string
args:
- string
fileUris:
- string
loggingConfig:
driverLogLevels:
string: string
mainRFileUri: string
properties:
string: string
sparkSqlJob:
jarFileUris:
- string
loggingConfig:
driverLogLevels:
string: string
properties:
string: string
queryFileUri: string
queryList:
queries:
- string
scriptVariables:
string: string
stepId: string
labels:
string: string
location: string
name: string
parameters:
- description: string
fields:
- string
name: string
validation:
regex:
regexes:
- string
values:
values:
- string
placement:
clusterSelector:
clusterLabels:
string: string
zone: string
managedCluster:
clusterName: string
config:
autoscalingConfig:
policy: string
encryptionConfig:
gcePdKmsKeyName: string
endpointConfig:
enableHttpPortAccess: false
httpPorts:
string: string
gceClusterConfig:
internalIpOnly: false
metadata:
string: string
network: string
nodeGroupAffinity:
nodeGroup: string
privateIpv6GoogleAccess: string
reservationAffinity:
consumeReservationType: string
key: string
values:
- string
serviceAccount: string
serviceAccountScopes:
- string
shieldedInstanceConfig:
enableIntegrityMonitoring: false
enableSecureBoot: false
enableVtpm: false
subnetwork: string
tags:
- string
zone: string
gkeClusterConfig:
namespacedGkeDeploymentTarget:
clusterNamespace: string
targetGkeCluster: string
initializationActions:
- executableFile: string
executionTimeout: string
lifecycleConfig:
autoDeleteTime: string
autoDeleteTtl: string
idleDeleteTtl: string
idleStartTime: string
masterConfig:
accelerators:
- acceleratorCount: 0
acceleratorType: string
diskConfig:
bootDiskSizeGb: 0
bootDiskType: string
numLocalSsds: 0
image: string
instanceNames:
- string
isPreemptible: false
machineType: string
managedGroupConfigs:
- instanceGroupManagerName: string
instanceTemplateName: string
minCpuPlatform: string
numInstances: 0
preemptibility: string
metastoreConfig:
dataprocMetastoreService: string
secondaryWorkerConfig:
accelerators:
- acceleratorCount: 0
acceleratorType: string
diskConfig:
bootDiskSizeGb: 0
bootDiskType: string
numLocalSsds: 0
image: string
instanceNames:
- string
isPreemptible: false
machineType: string
managedGroupConfigs:
- instanceGroupManagerName: string
instanceTemplateName: string
minCpuPlatform: string
numInstances: 0
preemptibility: string
securityConfig:
kerberosConfig:
crossRealmTrustAdminServer: string
crossRealmTrustKdc: string
crossRealmTrustRealm: string
crossRealmTrustSharedPassword: string
enableKerberos: false
kdcDbKey: string
keyPassword: string
keystore: string
keystorePassword: string
kmsKey: string
realm: string
rootPrincipalPassword: string
tgtLifetimeHours: 0
truststore: string
truststorePassword: string
softwareConfig:
imageVersion: string
optionalComponents:
- string
properties:
string: string
stagingBucket: string
tempBucket: string
workerConfig:
accelerators:
- acceleratorCount: 0
acceleratorType: string
diskConfig:
bootDiskSizeGb: 0
bootDiskType: string
numLocalSsds: 0
image: string
instanceNames:
- string
isPreemptible: false
machineType: string
managedGroupConfigs:
- instanceGroupManagerName: string
instanceTemplateName: string
minCpuPlatform: string
numInstances: 0
preemptibility: string
labels:
string: string
project: string
WorkflowTemplate Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The WorkflowTemplate resource accepts the following input properties:
- Jobs
This property is required. Changes to this property will trigger replacement.
Template Job> - Required. The Directed Acyclic Graph of Jobs to submit.
- Location
This property is required. Changes to this property will trigger replacement.
- The location for the resource
- Placement
This property is required. Changes to this property will trigger replacement.
Template Placement - Required. WorkflowTemplate scheduling information.
- Dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- Encryption
Config Changes to this property will trigger replacement.
Template Encryption Config - Optional. The encryption configuration for the workflow template.
- Labels Dictionary<string, string>
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - Name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- Parameters
Changes to this property will trigger replacement.
Template Parameter> - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- Project
Changes to this property will trigger replacement.
- The project for the resource
- Version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- Jobs
This property is required. Changes to this property will trigger replacement.
Template Job Args - Required. The Directed Acyclic Graph of Jobs to submit.
- Location
This property is required. Changes to this property will trigger replacement.
- The location for the resource
- Placement
This property is required. Changes to this property will trigger replacement.
Template Placement Args - Required. WorkflowTemplate scheduling information.
- Dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- Encryption
Config Changes to this property will trigger replacement.
Template Encryption Config Args - Optional. The encryption configuration for the workflow template.
- Labels map[string]string
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - Name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- Parameters
Changes to this property will trigger replacement.
Template Parameter Args - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- Project
Changes to this property will trigger replacement.
- The project for the resource
- Version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- jobs
This property is required. Changes to this property will trigger replacement.
Template Job> - Required. The Directed Acyclic Graph of Jobs to submit.
- location
This property is required. Changes to this property will trigger replacement.
- The location for the resource
- placement
This property is required. Changes to this property will trigger replacement.
Template Placement - Required. WorkflowTemplate scheduling information.
- dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- encryption
Config Changes to this property will trigger replacement.
Template Encryption Config - Optional. The encryption configuration for the workflow template.
- labels Map<String,String>
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- parameters
Changes to this property will trigger replacement.
Template Parameter> - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- project
Changes to this property will trigger replacement.
- The project for the resource
- version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- jobs
This property is required. Changes to this property will trigger replacement.
Template Job[] - Required. The Directed Acyclic Graph of Jobs to submit.
- location
This property is required. Changes to this property will trigger replacement.
- The location for the resource
- placement
This property is required. Changes to this property will trigger replacement.
Template Placement - Required. WorkflowTemplate scheduling information.
- dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- encryption
Config Changes to this property will trigger replacement.
Template Encryption Config - Optional. The encryption configuration for the workflow template.
- labels {[key: string]: string}
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- parameters
Changes to this property will trigger replacement.
Template Parameter[] - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- project
Changes to this property will trigger replacement.
- The project for the resource
- version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- jobs
This property is required. Changes to this property will trigger replacement.
Template Job Args] - Required. The Directed Acyclic Graph of Jobs to submit.
- location
This property is required. Changes to this property will trigger replacement.
- The location for the resource
- placement
This property is required. Changes to this property will trigger replacement.
Template Placement Args - Required. WorkflowTemplate scheduling information.
- dag_
timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- encryption_
config Changes to this property will trigger replacement.
Template Encryption Config Args - Optional. The encryption configuration for the workflow template.
- labels Mapping[str, str]
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- parameters
Changes to this property will trigger replacement.
Template Parameter Args] - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- project
Changes to this property will trigger replacement.
- The project for the resource
- version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- jobs
This property is required. Changes to this property will trigger replacement.
- Required. The Directed Acyclic Graph of Jobs to submit.
- location
This property is required. Changes to this property will trigger replacement.
- The location for the resource
- placement
This property is required. Changes to this property will trigger replacement.
- Required. WorkflowTemplate scheduling information.
- dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- encryption
Config Changes to this property will trigger replacement.
- Optional. The encryption configuration for the workflow template.
- labels Map<String>
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- parameters
Changes to this property will trigger replacement.
- Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- project
Changes to this property will trigger replacement.
- The project for the resource
- version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
Outputs
All input properties are implicitly available as output properties. Additionally, the WorkflowTemplate resource produces the following output properties:
- Create
Time string - Output only. The time template was created.
- Effective
Labels Dictionary<string, string> - Id string
- The provider-assigned unique ID for this managed resource.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Update
Time string - Output only. The time template was last updated.
- Create
Time string - Output only. The time template was created.
- Effective
Labels map[string]string - Id string
- The provider-assigned unique ID for this managed resource.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Update
Time string - Output only. The time template was last updated.
- create
Time String - Output only. The time template was created.
- effective
Labels Map<String,String> - id String
- The provider-assigned unique ID for this managed resource.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- update
Time String - Output only. The time template was last updated.
- create
Time string - Output only. The time template was created.
- effective
Labels {[key: string]: string} - id string
- The provider-assigned unique ID for this managed resource.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- update
Time string - Output only. The time template was last updated.
- create_
time str - Output only. The time template was created.
- effective_
labels Mapping[str, str] - id str
- The provider-assigned unique ID for this managed resource.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- update_
time str - Output only. The time template was last updated.
- create
Time String - Output only. The time template was created.
- effective
Labels Map<String> - id String
- The provider-assigned unique ID for this managed resource.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- update
Time String - Output only. The time template was last updated.
Look up Existing WorkflowTemplate Resource
Get an existing WorkflowTemplate resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: WorkflowTemplateState, opts?: CustomResourceOptions): WorkflowTemplate
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
create_time: Optional[str] = None,
dag_timeout: Optional[str] = None,
effective_labels: Optional[Mapping[str, str]] = None,
encryption_config: Optional[WorkflowTemplateEncryptionConfigArgs] = None,
jobs: Optional[Sequence[WorkflowTemplateJobArgs]] = None,
labels: Optional[Mapping[str, str]] = None,
location: Optional[str] = None,
name: Optional[str] = None,
parameters: Optional[Sequence[WorkflowTemplateParameterArgs]] = None,
placement: Optional[WorkflowTemplatePlacementArgs] = None,
project: Optional[str] = None,
pulumi_labels: Optional[Mapping[str, str]] = None,
update_time: Optional[str] = None,
version: Optional[int] = None) -> WorkflowTemplate
func GetWorkflowTemplate(ctx *Context, name string, id IDInput, state *WorkflowTemplateState, opts ...ResourceOption) (*WorkflowTemplate, error)
public static WorkflowTemplate Get(string name, Input<string> id, WorkflowTemplateState? state, CustomResourceOptions? opts = null)
public static WorkflowTemplate get(String name, Output<String> id, WorkflowTemplateState state, CustomResourceOptions options)
resources: _: type: gcp:dataproc:WorkflowTemplate get: id: ${id}
- name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
This property is required. - The unique name of the resulting resource.
- id
This property is required. - The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Create
Time string - Output only. The time template was created.
- Dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- Effective
Labels Changes to this property will trigger replacement.
- Encryption
Config Changes to this property will trigger replacement.
Template Encryption Config - Optional. The encryption configuration for the workflow template.
- Jobs
Changes to this property will trigger replacement.
Template Job> - Required. The Directed Acyclic Graph of Jobs to submit.
- Labels Dictionary<string, string>
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - Location
Changes to this property will trigger replacement.
- The location for the resource
- Name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- Parameters
Changes to this property will trigger replacement.
Template Parameter> - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- Placement
Changes to this property will trigger replacement.
Template Placement - Required. WorkflowTemplate scheduling information.
- Project
Changes to this property will trigger replacement.
- The project for the resource
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Update
Time string - Output only. The time template was last updated.
- Version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- Create
Time string - Output only. The time template was created.
- Dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- Effective
Labels Changes to this property will trigger replacement.
- Encryption
Config Changes to this property will trigger replacement.
Template Encryption Config Args - Optional. The encryption configuration for the workflow template.
- Jobs
Changes to this property will trigger replacement.
Template Job Args - Required. The Directed Acyclic Graph of Jobs to submit.
- Labels map[string]string
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - Location
Changes to this property will trigger replacement.
- The location for the resource
- Name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- Parameters
Changes to this property will trigger replacement.
Template Parameter Args - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- Placement
Changes to this property will trigger replacement.
Template Placement Args - Required. WorkflowTemplate scheduling information.
- Project
Changes to this property will trigger replacement.
- The project for the resource
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Update
Time string - Output only. The time template was last updated.
- Version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- create
Time String - Output only. The time template was created.
- dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- effective
Labels Changes to this property will trigger replacement.
- encryption
Config Changes to this property will trigger replacement.
Template Encryption Config - Optional. The encryption configuration for the workflow template.
- jobs
Changes to this property will trigger replacement.
Template Job> - Required. The Directed Acyclic Graph of Jobs to submit.
- labels Map<String,String>
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - location
Changes to this property will trigger replacement.
- The location for the resource
- name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- parameters
Changes to this property will trigger replacement.
Template Parameter> - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- placement
Changes to this property will trigger replacement.
Template Placement - Required. WorkflowTemplate scheduling information.
- project
Changes to this property will trigger replacement.
- The project for the resource
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- update
Time String - Output only. The time template was last updated.
- version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- create
Time string - Output only. The time template was created.
- dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- effective
Labels Changes to this property will trigger replacement.
- encryption
Config Changes to this property will trigger replacement.
Template Encryption Config - Optional. The encryption configuration for the workflow template.
- jobs
Changes to this property will trigger replacement.
Template Job[] - Required. The Directed Acyclic Graph of Jobs to submit.
- labels {[key: string]: string}
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - location
Changes to this property will trigger replacement.
- The location for the resource
- name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- parameters
Changes to this property will trigger replacement.
Template Parameter[] - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- placement
Changes to this property will trigger replacement.
Template Placement - Required. WorkflowTemplate scheduling information.
- project
Changes to this property will trigger replacement.
- The project for the resource
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- update
Time string - Output only. The time template was last updated.
- version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- create_
time str - Output only. The time template was created.
- dag_
timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- effective_
labels Changes to this property will trigger replacement.
- encryption_
config Changes to this property will trigger replacement.
Template Encryption Config Args - Optional. The encryption configuration for the workflow template.
- jobs
Changes to this property will trigger replacement.
Template Job Args] - Required. The Directed Acyclic Graph of Jobs to submit.
- labels Mapping[str, str]
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - location
Changes to this property will trigger replacement.
- The location for the resource
- name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- parameters
Changes to this property will trigger replacement.
Template Parameter Args] - Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- placement
Changes to this property will trigger replacement.
Template Placement Args - Required. WorkflowTemplate scheduling information.
- project
Changes to this property will trigger replacement.
- The project for the resource
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- update_
time str - Output only. The time template was last updated.
- version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
- create
Time String - Output only. The time template was created.
- dag
Timeout Changes to this property will trigger replacement.
- Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted.
- effective
Labels Changes to this property will trigger replacement.
- encryption
Config Changes to this property will trigger replacement.
- Optional. The encryption configuration for the workflow template.
- jobs
Changes to this property will trigger replacement.
- Required. The Directed Acyclic Graph of Jobs to submit.
- labels Map<String>
- Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created
by the workflow instance. Label keys must contain 1 to 63 characters, and must conform to RFC
1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be
associated with a template. Note: This field is non-authoritative, and will only manage the labels present in your
configuration. Please refer to the field
effective_labels
for all of the labels present on the resource. - location
Changes to this property will trigger replacement.
- The location for the resource
- name
Changes to this property will trigger replacement.
- Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For
projects.regions.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
* Forprojects.locations.workflowTemplates
, the resource name of the template has the following format:projects/{project_id}/locations/{location}/workflowTemplates/{template_id}
- parameters
Changes to this property will trigger replacement.
- Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
- placement
Changes to this property will trigger replacement.
- Required. WorkflowTemplate scheduling information.
- project
Changes to this property will trigger replacement.
- The project for the resource
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- update
Time String - Output only. The time template was last updated.
- version
Changes to this property will trigger replacement.
- Output only. The current version of this workflow template.
Supporting Types
WorkflowTemplateEncryptionConfig, WorkflowTemplateEncryptionConfigArgs
- Kms
Key Changes to this property will trigger replacement.
- Optional. The Cloud KMS key name to use for encryption.
- Kms
Key Changes to this property will trigger replacement.
- Optional. The Cloud KMS key name to use for encryption.
- kms
Key Changes to this property will trigger replacement.
- Optional. The Cloud KMS key name to use for encryption.
- kms
Key Changes to this property will trigger replacement.
- Optional. The Cloud KMS key name to use for encryption.
- kms_
key Changes to this property will trigger replacement.
- Optional. The Cloud KMS key name to use for encryption.
- kms
Key Changes to this property will trigger replacement.
- Optional. The Cloud KMS key name to use for encryption.
WorkflowTemplateJob, WorkflowTemplateJobArgs
- Step
Id This property is required. Changes to this property will trigger replacement.
- Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job
goog-dataproc-workflow-step-id
label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. - Hadoop
Job Changes to this property will trigger replacement.
Template Job Hadoop Job - Job is a Hadoop job.
- Hive
Job Changes to this property will trigger replacement.
Template Job Hive Job - Job is a Hive job.
- Labels
Changes to this property will trigger replacement.
- The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
- Pig
Job Changes to this property will trigger replacement.
Template Job Pig Job - Job is a Pig job.
- Prerequisite
Step Ids Changes to this property will trigger replacement.
- The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
- Presto
Job Changes to this property will trigger replacement.
Template Job Presto Job - Job is a Presto job.
- Pyspark
Job Changes to this property will trigger replacement.
Template Job Pyspark Job - Job is a PySpark job.
- Scheduling
Changes to this property will trigger replacement.
Template Job Scheduling - Job scheduling configuration.
- Spark
Job Changes to this property will trigger replacement.
Template Job Spark Job - Job is a Spark job.
- Spark
RJob Changes to this property will trigger replacement.
Template Job Spark RJob - Job is a SparkR job.
- Spark
Sql Job Changes to this property will trigger replacement.
Template Job Spark Sql Job - Job is a SparkSql job.
- Step
Id This property is required. Changes to this property will trigger replacement.
- Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job
goog-dataproc-workflow-step-id
label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. - Hadoop
Job Changes to this property will trigger replacement.
Template Job Hadoop Job - Job is a Hadoop job.
- Hive
Job Changes to this property will trigger replacement.
Template Job Hive Job - Job is a Hive job.
- Labels
Changes to this property will trigger replacement.
- The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
- Pig
Job Changes to this property will trigger replacement.
Template Job Pig Job - Job is a Pig job.
- Prerequisite
Step Ids Changes to this property will trigger replacement.
- The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
- Presto
Job Changes to this property will trigger replacement.
Template Job Presto Job - Job is a Presto job.
- Pyspark
Job Changes to this property will trigger replacement.
Template Job Pyspark Job - Job is a PySpark job.
- Scheduling
Changes to this property will trigger replacement.
Template Job Scheduling - Job scheduling configuration.
- Spark
Job Changes to this property will trigger replacement.
Template Job Spark Job - Job is a Spark job.
- Spark
RJob Changes to this property will trigger replacement.
Template Job Spark RJob - Job is a SparkR job.
- Spark
Sql Job Changes to this property will trigger replacement.
Template Job Spark Sql Job - Job is a SparkSql job.
- step
Id This property is required. Changes to this property will trigger replacement.
- Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job
goog-dataproc-workflow-step-id
label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. - hadoop
Job Changes to this property will trigger replacement.
Template Job Hadoop Job - Job is a Hadoop job.
- hive
Job Changes to this property will trigger replacement.
Template Job Hive Job - Job is a Hive job.
- labels
Changes to this property will trigger replacement.
- The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
- pig
Job Changes to this property will trigger replacement.
Template Job Pig Job - Job is a Pig job.
- prerequisite
Step Ids Changes to this property will trigger replacement.
- The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
- presto
Job Changes to this property will trigger replacement.
Template Job Presto Job - Job is a Presto job.
- pyspark
Job Changes to this property will trigger replacement.
Template Job Pyspark Job - Job is a PySpark job.
- scheduling
Changes to this property will trigger replacement.
Template Job Scheduling - Job scheduling configuration.
- spark
Job Changes to this property will trigger replacement.
Template Job Spark Job - Job is a Spark job.
- spark
RJob Changes to this property will trigger replacement.
Template Job Spark RJob - Job is a SparkR job.
- spark
Sql Job Changes to this property will trigger replacement.
Template Job Spark Sql Job - Job is a SparkSql job.
- step
Id This property is required. Changes to this property will trigger replacement.
- Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job
goog-dataproc-workflow-step-id
label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. - hadoop
Job Changes to this property will trigger replacement.
Template Job Hadoop Job - Job is a Hadoop job.
- hive
Job Changes to this property will trigger replacement.
Template Job Hive Job - Job is a Hive job.
- labels
Changes to this property will trigger replacement.
- The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
- pig
Job Changes to this property will trigger replacement.
Template Job Pig Job - Job is a Pig job.
- prerequisite
Step Ids Changes to this property will trigger replacement.
- The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
- presto
Job Changes to this property will trigger replacement.
Template Job Presto Job - Job is a Presto job.
- pyspark
Job Changes to this property will trigger replacement.
Template Job Pyspark Job - Job is a PySpark job.
- scheduling
Changes to this property will trigger replacement.
Template Job Scheduling - Job scheduling configuration.
- spark
Job Changes to this property will trigger replacement.
Template Job Spark Job - Job is a Spark job.
- spark
RJob Changes to this property will trigger replacement.
Template Job Spark RJob - Job is a SparkR job.
- spark
Sql Job Changes to this property will trigger replacement.
Template Job Spark Sql Job - Job is a SparkSql job.
- step_
id This property is required. Changes to this property will trigger replacement.
- Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job
goog-dataproc-workflow-step-id
label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. - hadoop_
job Changes to this property will trigger replacement.
Template Job Hadoop Job - Job is a Hadoop job.
- hive_
job Changes to this property will trigger replacement.
Template Job Hive Job - Job is a Hive job.
- labels
Changes to this property will trigger replacement.
- The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
- pig_
job Changes to this property will trigger replacement.
Template Job Pig Job - Job is a Pig job.
- prerequisite_
step_ ids Changes to this property will trigger replacement.
- The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
- presto_
job Changes to this property will trigger replacement.
Template Job Presto Job - Job is a Presto job.
- pyspark_
job Changes to this property will trigger replacement.
Template Job Pyspark Job - Job is a PySpark job.
- scheduling
Changes to this property will trigger replacement.
Template Job Scheduling - Job scheduling configuration.
- spark_
job Changes to this property will trigger replacement.
Template Job Spark Job - Job is a Spark job.
- spark_
r_ job Changes to this property will trigger replacement.
Template Job Spark RJob - Job is a SparkR job.
- spark_
sql_ job Changes to this property will trigger replacement.
Template Job Spark Sql Job - Job is a SparkSql job.
- step
Id This property is required. Changes to this property will trigger replacement.
- Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job
goog-dataproc-workflow-step-id
label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. - hadoop
Job Changes to this property will trigger replacement.
- Job is a Hadoop job.
- hive
Job Changes to this property will trigger replacement.
- Job is a Hive job.
- labels
Changes to this property will trigger replacement.
- The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job.
- pig
Job Changes to this property will trigger replacement.
- Job is a Pig job.
- prerequisite
Step Ids Changes to this property will trigger replacement.
- The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
- presto
Job Changes to this property will trigger replacement.
- Job is a Presto job.
- pyspark
Job Changes to this property will trigger replacement.
- Job is a PySpark job.
- scheduling
Changes to this property will trigger replacement.
- Job scheduling configuration.
- spark
Job Changes to this property will trigger replacement.
- Job is a Spark job.
- spark
RJob Changes to this property will trigger replacement.
- Job is a SparkR job.
- spark
Sql Job Changes to this property will trigger replacement.
- Job is a SparkSql job.
WorkflowTemplateJobHadoopJob, WorkflowTemplateJobHadoopJobArgs
- Archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- Args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
-libjars
or-Dfoo=bar
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - File
Uris Changes to this property will trigger replacement.
- HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- Jar
File Uris Changes to this property will trigger replacement.
- Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- Logging
Config Changes to this property will trigger replacement.
Template Job Hadoop Job Logging Config - The runtime log config for job execution.
- Main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in
jar_file_uris
. - Main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- Archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- Args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
-libjars
or-Dfoo=bar
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - File
Uris Changes to this property will trigger replacement.
- HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- Jar
File Uris Changes to this property will trigger replacement.
- Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- Logging
Config Changes to this property will trigger replacement.
Template Job Hadoop Job Logging Config - The runtime log config for job execution.
- Main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in
jar_file_uris
. - Main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
-libjars
or-Dfoo=bar
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- logging
Config Changes to this property will trigger replacement.
Template Job Hadoop Job Logging Config - The runtime log config for job execution.
- main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in
jar_file_uris
. - main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
-libjars
or-Dfoo=bar
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- logging
Config Changes to this property will trigger replacement.
Template Job Hadoop Job Logging Config - The runtime log config for job execution.
- main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in
jar_file_uris
. - main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- archive_
uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
-libjars
or-Dfoo=bar
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file_
uris Changes to this property will trigger replacement.
- HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- jar_
file_ uris Changes to this property will trigger replacement.
- Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- logging_
config Changes to this property will trigger replacement.
Template Job Hadoop Job Logging Config - The runtime log config for job execution.
- main_
class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in
jar_file_uris
. - main_
jar_ file_ uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
-libjars
or-Dfoo=bar
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- logging
Config Changes to this property will trigger replacement.
- The runtime log config for job execution.
- main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in
jar_file_uris
. - main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
WorkflowTemplateJobHadoopJobLoggingConfig, WorkflowTemplateJobHadoopJobLoggingConfigArgs
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver_
log_ levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
WorkflowTemplateJobHiveJob, WorkflowTemplateJobHiveJobArgs
- Continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
- Query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains Hive queries.
- Query
List Changes to this property will trigger replacement.
Template Job Hive Job Query List - A list of queries.
- Script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Hive command:
SET name="value";
).
- Continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
- Query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains Hive queries.
- Query
List Changes to this property will trigger replacement.
Template Job Hive Job Query List - A list of queries.
- Script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Hive command:
SET name="value";
).
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
- properties
Changes to this property will trigger replacement.
- A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains Hive queries.
- query
List Changes to this property will trigger replacement.
Template Job Hive Job Query List - A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Hive command:
SET name="value";
).
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
- properties
Changes to this property will trigger replacement.
- A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains Hive queries.
- query
List Changes to this property will trigger replacement.
Template Job Hive Job Query List - A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Hive command:
SET name="value";
).
- continue_
on_ failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - jar_
file_ uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
- properties
Changes to this property will trigger replacement.
- A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
- query_
file_ uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains Hive queries.
- query_
list Changes to this property will trigger replacement.
Template Job Hive Job Query List - A list of queries.
- script_
variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Hive command:
SET name="value";
).
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
- properties
Changes to this property will trigger replacement.
- A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains Hive queries.
- query
List Changes to this property will trigger replacement.
- A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Hive command:
SET name="value";
).
WorkflowTemplateJobHiveJobQueryList, WorkflowTemplateJobHiveJobQueryListArgs
- Queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- Queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
WorkflowTemplateJobPigJob, WorkflowTemplateJobPigJobArgs
- Continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- Logging
Config Changes to this property will trigger replacement.
Template Job Pig Job Logging Config - The runtime log config for job execution.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
- Query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains the Pig queries.
- Query
List Changes to this property will trigger replacement.
Template Job Pig Job Query List - A list of queries.
- Script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Pig command:
name=
).
- Continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- Logging
Config Changes to this property will trigger replacement.
Template Job Pig Job Logging Config - The runtime log config for job execution.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
- Query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains the Pig queries.
- Query
List Changes to this property will trigger replacement.
Template Job Pig Job Query List - A list of queries.
- Script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Pig command:
name=
).
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- logging
Config Changes to this property will trigger replacement.
Template Job Pig Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains the Pig queries.
- query
List Changes to this property will trigger replacement.
Template Job Pig Job Query List - A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Pig command:
name=
).
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- logging
Config Changes to this property will trigger replacement.
Template Job Pig Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains the Pig queries.
- query
List Changes to this property will trigger replacement.
Template Job Pig Job Query List - A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Pig command:
name=
).
- continue_
on_ failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - jar_
file_ uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- logging_
config Changes to this property will trigger replacement.
Template Job Pig Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
- query_
file_ uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains the Pig queries.
- query_
list Changes to this property will trigger replacement.
Template Job Pig Job Query List - A list of queries.
- script_
variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Pig command:
name=
).
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- logging
Config Changes to this property will trigger replacement.
- The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains the Pig queries.
- query
List Changes to this property will trigger replacement.
- A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Pig command:
name=
).
WorkflowTemplateJobPigJobLoggingConfig, WorkflowTemplateJobPigJobLoggingConfigArgs
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver_
log_ levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
WorkflowTemplateJobPigJobQueryList, WorkflowTemplateJobPigJobQueryListArgs
- Queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- Queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
WorkflowTemplateJobPrestoJob, WorkflowTemplateJobPrestoJobArgs
Changes to this property will trigger replacement.
- Presto client tags to attach to this query
- Continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - Logging
Config Changes to this property will trigger replacement.
Template Job Presto Job Logging Config - The runtime log config for job execution.
- Output
Format Changes to this property will trigger replacement.
- The format in which query output will be displayed. See the Presto documentation for supported output formats
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
- Query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- Query
List Changes to this property will trigger replacement.
Template Job Presto Job Query List - A list of queries.
Changes to this property will trigger replacement.
- Presto client tags to attach to this query
- Continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - Logging
Config Changes to this property will trigger replacement.
Template Job Presto Job Logging Config - The runtime log config for job execution.
- Output
Format Changes to this property will trigger replacement.
- The format in which query output will be displayed. See the Presto documentation for supported output formats
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
- Query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- Query
List Changes to this property will trigger replacement.
Template Job Presto Job Query List - A list of queries.
Changes to this property will trigger replacement.
- Presto client tags to attach to this query
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - logging
Config Changes to this property will trigger replacement.
Template Job Presto Job Logging Config - The runtime log config for job execution.
- output
Format Changes to this property will trigger replacement.
- The format in which query output will be displayed. See the Presto documentation for supported output formats
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- query
List Changes to this property will trigger replacement.
Template Job Presto Job Query List - A list of queries.
Changes to this property will trigger replacement.
- Presto client tags to attach to this query
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - logging
Config Changes to this property will trigger replacement.
Template Job Presto Job Logging Config - The runtime log config for job execution.
- output
Format Changes to this property will trigger replacement.
- The format in which query output will be displayed. See the Presto documentation for supported output formats
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- query
List Changes to this property will trigger replacement.
Template Job Presto Job Query List - A list of queries.
Changes to this property will trigger replacement.
- Presto client tags to attach to this query
- continue_
on_ failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - logging_
config Changes to this property will trigger replacement.
Template Job Presto Job Logging Config - The runtime log config for job execution.
- output_
format Changes to this property will trigger replacement.
- The format in which query output will be displayed. See the Presto documentation for supported output formats
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
- query_
file_ uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- query_
list Changes to this property will trigger replacement.
Template Job Presto Job Query List - A list of queries.
Changes to this property will trigger replacement.
- Presto client tags to attach to this query
- continue
On Failure Changes to this property will trigger replacement.
- Whether to continue executing queries if a query fails. The default value is
false
. Setting totrue
can be useful when executing independent parallel queries. - logging
Config Changes to this property will trigger replacement.
- The runtime log config for job execution.
- output
Format Changes to this property will trigger replacement.
- The format in which query output will be displayed. See the Presto documentation for supported output formats
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values. Used to set Presto (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- query
List Changes to this property will trigger replacement.
- A list of queries.
WorkflowTemplateJobPrestoJobLoggingConfig, WorkflowTemplateJobPrestoJobLoggingConfigArgs
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver_
log_ levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
WorkflowTemplateJobPrestoJobQueryList, WorkflowTemplateJobPrestoJobQueryListArgs
- Queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- Queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
WorkflowTemplateJobPysparkJob, WorkflowTemplateJobPysparkJobArgs
- Main
Python File Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- Archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - File
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
- Logging
Config Changes to this property will trigger replacement.
Template Job Pyspark Job Logging Config - The runtime log config for job execution.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- Python
File Uris Changes to this property will trigger replacement.
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- Main
Python File Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- Archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - File
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
- Logging
Config Changes to this property will trigger replacement.
Template Job Pyspark Job Logging Config - The runtime log config for job execution.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- Python
File Uris Changes to this property will trigger replacement.
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- main
Python File Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
- logging
Config Changes to this property will trigger replacement.
Template Job Pyspark Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- python
File Uris Changes to this property will trigger replacement.
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- main
Python File Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
- logging
Config Changes to this property will trigger replacement.
Template Job Pyspark Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- python
File Uris Changes to this property will trigger replacement.
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- main_
python_ file_ uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- archive_
uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file_
uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- jar_
file_ uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
- logging_
config Changes to this property will trigger replacement.
Template Job Pyspark Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- python_
file_ uris Changes to this property will trigger replacement.
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
- main
Python File Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
- logging
Config Changes to this property will trigger replacement.
- The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- python
File Uris Changes to this property will trigger replacement.
- HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
WorkflowTemplateJobPysparkJobLoggingConfig, WorkflowTemplateJobPysparkJobLoggingConfigArgs
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver_
log_ levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
WorkflowTemplateJobScheduling, WorkflowTemplateJobSchedulingArgs
- Max
Failures Per Hour Changes to this property will trigger replacement.
- Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
- Max
Failures Total Changes to this property will trigger replacement.
- Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
- Max
Failures Per Hour Changes to this property will trigger replacement.
- Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
- Max
Failures Total Changes to this property will trigger replacement.
- Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
- max
Failures Per Hour Changes to this property will trigger replacement.
- Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
- max
Failures Total Changes to this property will trigger replacement.
- Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
- max
Failures Per Hour Changes to this property will trigger replacement.
- Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
- max
Failures Total Changes to this property will trigger replacement.
- Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
- max_
failures_ per_ hour Changes to this property will trigger replacement.
- Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
- max_
failures_ total Changes to this property will trigger replacement.
- Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
- max
Failures Per Hour Changes to this property will trigger replacement.
- Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
- max
Failures Total Changes to this property will trigger replacement.
- Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
WorkflowTemplateJobSparkJob, WorkflowTemplateJobSparkJobArgs
- Archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - File
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
- Logging
Config Changes to this property will trigger replacement.
Template Job Spark Job Logging Config - The runtime log config for job execution.
- Main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in
jar_file_uris
. - Main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file that contains the main class.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- Archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - File
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
- Logging
Config Changes to this property will trigger replacement.
Template Job Spark Job Logging Config - The runtime log config for job execution.
- Main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in
jar_file_uris
. - Main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file that contains the main class.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
- logging
Config Changes to this property will trigger replacement.
Template Job Spark Job Logging Config - The runtime log config for job execution.
- main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in
jar_file_uris
. - main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file that contains the main class.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
- logging
Config Changes to this property will trigger replacement.
Template Job Spark Job Logging Config - The runtime log config for job execution.
- main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in
jar_file_uris
. - main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file that contains the main class.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- archive_
uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file_
uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- jar_
file_ uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
- logging_
config Changes to this property will trigger replacement.
Template Job Spark Job Logging Config - The runtime log config for job execution.
- main_
class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in
jar_file_uris
. - main_
jar_ file_ uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file that contains the main class.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
- logging
Config Changes to this property will trigger replacement.
- The runtime log config for job execution.
- main
Class Changes to this property will trigger replacement.
- The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in
jar_file_uris
. - main
Jar File Uri Changes to this property will trigger replacement.
- The HCFS URI of the jar file that contains the main class.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
WorkflowTemplateJobSparkJobLoggingConfig, WorkflowTemplateJobSparkJobLoggingConfigArgs
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver_
log_ levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
WorkflowTemplateJobSparkRJob, WorkflowTemplateJobSparkRJobArgs
- Main
RFile Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
- Archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - File
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- Logging
Config Changes to this property will trigger replacement.
Template Job Spark RJob Logging Config - The runtime log config for job execution.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- Main
RFile Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
- Archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- Args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - File
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- Logging
Config Changes to this property will trigger replacement.
Template Job Spark RJob Logging Config - The runtime log config for job execution.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- main
RFile Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- logging
Config Changes to this property will trigger replacement.
Template Job Spark RJob Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- main
RFile Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- logging
Config Changes to this property will trigger replacement.
Template Job Spark RJob Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- main_
r_ file_ uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
- archive_
uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file_
uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- logging_
config Changes to this property will trigger replacement.
Template Job Spark RJob Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
- main
RFile Uri This property is required. Changes to this property will trigger replacement.
- Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
- archive
Uris Changes to this property will trigger replacement.
- HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
- args
Changes to this property will trigger replacement.
- The arguments to pass to the driver. Do not include arguments, such as
--conf
, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file
Uris Changes to this property will trigger replacement.
- HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
- logging
Config Changes to this property will trigger replacement.
- The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
WorkflowTemplateJobSparkRJobLoggingConfig, WorkflowTemplateJobSparkRJobLoggingConfigArgs
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver_
log_ levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
WorkflowTemplateJobSparkSqlJob, WorkflowTemplateJobSparkSqlJobArgs
- Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- Logging
Config Changes to this property will trigger replacement.
Template Job Spark Sql Job Logging Config - The runtime log config for job execution.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
- Query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- Query
List Changes to this property will trigger replacement.
Template Job Spark Sql Job Query List - A list of queries.
- Script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET
name="value";
).
- Jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- Logging
Config Changes to this property will trigger replacement.
Template Job Spark Sql Job Logging Config - The runtime log config for job execution.
- Properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
- Query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- Query
List Changes to this property will trigger replacement.
Template Job Spark Sql Job Query List - A list of queries.
- Script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET
name="value";
).
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- logging
Config Changes to this property will trigger replacement.
Template Job Spark Sql Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- query
List Changes to this property will trigger replacement.
Template Job Spark Sql Job Query List - A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET
name="value";
).
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- logging
Config Changes to this property will trigger replacement.
Template Job Spark Sql Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- query
List Changes to this property will trigger replacement.
Template Job Spark Sql Job Query List - A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET
name="value";
).
- jar_
file_ uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- logging_
config Changes to this property will trigger replacement.
Template Job Spark Sql Job Logging Config - The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
- query_
file_ uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- query_
list Changes to this property will trigger replacement.
Template Job Spark Sql Job Query List - A list of queries.
- script_
variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET
name="value";
).
- jar
File Uris Changes to this property will trigger replacement.
- HCFS URIs of jar files to be added to the Spark CLASSPATH.
- logging
Config Changes to this property will trigger replacement.
- The runtime log config for job execution.
- properties
Changes to this property will trigger replacement.
- A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.
- query
File Uri Changes to this property will trigger replacement.
- The HCFS URI of the script that contains SQL queries.
- query
List Changes to this property will trigger replacement.
- A list of queries.
- script
Variables Changes to this property will trigger replacement.
- Mapping of query variable names to values (equivalent to the Spark SQL command: SET
name="value";
).
WorkflowTemplateJobSparkSqlJobLoggingConfig, WorkflowTemplateJobSparkSqlJobLoggingConfigArgs
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- Driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver_
log_ levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- driver
Log Levels Changes to this property will trigger replacement.
- The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
WorkflowTemplateJobSparkSqlJobQueryList, WorkflowTemplateJobSparkSqlJobQueryListArgs
- Queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- Queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
- queries
This property is required. Changes to this property will trigger replacement.
- Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
WorkflowTemplateParameter, WorkflowTemplateParameterArgs
- Fields
This property is required. Changes to this property will trigger replacement.
- Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
- Name
This property is required. Changes to this property will trigger replacement.
- Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
- Description
Changes to this property will trigger replacement.
- Brief description of the parameter. Must not exceed 1024 characters.
- Validation
Changes to this property will trigger replacement.
Template Parameter Validation - Validation rules to be applied to this parameter's value.
- Fields
This property is required. Changes to this property will trigger replacement.
- Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
- Name
This property is required. Changes to this property will trigger replacement.
- Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
- Description
Changes to this property will trigger replacement.
- Brief description of the parameter. Must not exceed 1024 characters.
- Validation
Changes to this property will trigger replacement.
Template Parameter Validation - Validation rules to be applied to this parameter's value.
- fields
This property is required. Changes to this property will trigger replacement.
- Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
- name
This property is required. Changes to this property will trigger replacement.
- Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
- description
Changes to this property will trigger replacement.
- Brief description of the parameter. Must not exceed 1024 characters.
- validation
Changes to this property will trigger replacement.
Template Parameter Validation - Validation rules to be applied to this parameter's value.
- fields
This property is required. Changes to this property will trigger replacement.
- Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
- name
This property is required. Changes to this property will trigger replacement.
- Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
- description
Changes to this property will trigger replacement.
- Brief description of the parameter. Must not exceed 1024 characters.
- validation
Changes to this property will trigger replacement.
Template Parameter Validation - Validation rules to be applied to this parameter's value.
- fields
This property is required. Changes to this property will trigger replacement.
- Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
- name
This property is required. Changes to this property will trigger replacement.
- Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
- description
Changes to this property will trigger replacement.
- Brief description of the parameter. Must not exceed 1024 characters.
- validation
Changes to this property will trigger replacement.
Template Parameter Validation - Validation rules to be applied to this parameter's value.
- fields
This property is required. Changes to this property will trigger replacement.
- Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
- name
This property is required. Changes to this property will trigger replacement.
- Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
- description
Changes to this property will trigger replacement.
- Brief description of the parameter. Must not exceed 1024 characters.
- validation
Changes to this property will trigger replacement.
- Validation rules to be applied to this parameter's value.
WorkflowTemplateParameterValidation, WorkflowTemplateParameterValidationArgs
- Regex
Changes to this property will trigger replacement.
Template Parameter Validation Regex - Validation based on regular expressions.
- Values
Changes to this property will trigger replacement.
Template Parameter Validation Values - Validation based on a list of allowed values.
- Regex
Changes to this property will trigger replacement.
Template Parameter Validation Regex - Validation based on regular expressions.
- Values
Changes to this property will trigger replacement.
Template Parameter Validation Values - Validation based on a list of allowed values.
- regex
Changes to this property will trigger replacement.
Template Parameter Validation Regex - Validation based on regular expressions.
- values
Changes to this property will trigger replacement.
Template Parameter Validation Values - Validation based on a list of allowed values.
- regex
Changes to this property will trigger replacement.
Template Parameter Validation Regex - Validation based on regular expressions.
- values
Changes to this property will trigger replacement.
Template Parameter Validation Values - Validation based on a list of allowed values.
- regex
Changes to this property will trigger replacement.
Template Parameter Validation Regex - Validation based on regular expressions.
- values
Changes to this property will trigger replacement.
Template Parameter Validation Values - Validation based on a list of allowed values.
- regex
Changes to this property will trigger replacement.
- Validation based on regular expressions.
- values
Changes to this property will trigger replacement.
- Validation based on a list of allowed values.
WorkflowTemplateParameterValidationRegex, WorkflowTemplateParameterValidationRegexArgs
- Regexes
This property is required. Changes to this property will trigger replacement.
- Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
- Regexes
This property is required. Changes to this property will trigger replacement.
- Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
- regexes
This property is required. Changes to this property will trigger replacement.
- Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
- regexes
This property is required. Changes to this property will trigger replacement.
- Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
- regexes
This property is required. Changes to this property will trigger replacement.
- Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
- regexes
This property is required. Changes to this property will trigger replacement.
- Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
WorkflowTemplateParameterValidationValues, WorkflowTemplateParameterValidationValuesArgs
- Values
This property is required. Changes to this property will trigger replacement.
- Required. List of allowed values for the parameter.
- Values
This property is required. Changes to this property will trigger replacement.
- Required. List of allowed values for the parameter.
- values
This property is required. Changes to this property will trigger replacement.
- Required. List of allowed values for the parameter.
- values
This property is required. Changes to this property will trigger replacement.
- Required. List of allowed values for the parameter.
- values
This property is required. Changes to this property will trigger replacement.
- Required. List of allowed values for the parameter.
- values
This property is required. Changes to this property will trigger replacement.
- Required. List of allowed values for the parameter.
WorkflowTemplatePlacement, WorkflowTemplatePlacementArgs
- Cluster
Selector Changes to this property will trigger replacement.
Template Placement Cluster Selector - A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
- Managed
Cluster Changes to this property will trigger replacement.
Template Placement Managed Cluster - A cluster that is managed by the workflow.
- Cluster
Selector Changes to this property will trigger replacement.
Template Placement Cluster Selector - A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
- Managed
Cluster Changes to this property will trigger replacement.
Template Placement Managed Cluster - A cluster that is managed by the workflow.
- cluster
Selector Changes to this property will trigger replacement.
Template Placement Cluster Selector - A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
- managed
Cluster Changes to this property will trigger replacement.
Template Placement Managed Cluster - A cluster that is managed by the workflow.
- cluster
Selector Changes to this property will trigger replacement.
Template Placement Cluster Selector - A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
- managed
Cluster Changes to this property will trigger replacement.
Template Placement Managed Cluster - A cluster that is managed by the workflow.
- cluster_
selector Changes to this property will trigger replacement.
Template Placement Cluster Selector - A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
- managed_
cluster Changes to this property will trigger replacement.
Template Placement Managed Cluster - A cluster that is managed by the workflow.
- cluster
Selector Changes to this property will trigger replacement.
- A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
- managed
Cluster Changes to this property will trigger replacement.
- A cluster that is managed by the workflow.
WorkflowTemplatePlacementClusterSelector, WorkflowTemplatePlacementClusterSelectorArgs
- Cluster
Labels This property is required. Changes to this property will trigger replacement.
- Required. The cluster labels. Cluster must have all labels to match.
- Zone
Changes to this property will trigger replacement.
- The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
- Cluster
Labels This property is required. Changes to this property will trigger replacement.
- Required. The cluster labels. Cluster must have all labels to match.
- Zone
Changes to this property will trigger replacement.
- The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
- cluster
Labels This property is required. Changes to this property will trigger replacement.
- Required. The cluster labels. Cluster must have all labels to match.
- zone
Changes to this property will trigger replacement.
- The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
- cluster
Labels This property is required. Changes to this property will trigger replacement.
- Required. The cluster labels. Cluster must have all labels to match.
- zone
Changes to this property will trigger replacement.
- The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
- cluster_
labels This property is required. Changes to this property will trigger replacement.
- Required. The cluster labels. Cluster must have all labels to match.
- zone
Changes to this property will trigger replacement.
- The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
- cluster
Labels This property is required. Changes to this property will trigger replacement.
- Required. The cluster labels. Cluster must have all labels to match.
- zone
Changes to this property will trigger replacement.
- The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.
WorkflowTemplatePlacementManagedCluster, WorkflowTemplatePlacementManagedClusterArgs
- Cluster
Name This property is required. Changes to this property will trigger replacement.
- Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
- Config
This property is required. Changes to this property will trigger replacement.
Template Placement Managed Cluster Config - Required. The cluster configuration.
- Labels
Changes to this property will trigger replacement.
- The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
- Cluster
Name This property is required. Changes to this property will trigger replacement.
- Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
- Config
This property is required. Changes to this property will trigger replacement.
Template Placement Managed Cluster Config - Required. The cluster configuration.
- Labels
Changes to this property will trigger replacement.
- The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
- cluster
Name This property is required. Changes to this property will trigger replacement.
- Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
- config
This property is required. Changes to this property will trigger replacement.
Template Placement Managed Cluster Config - Required. The cluster configuration.
- labels
Changes to this property will trigger replacement.
- The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
- cluster
Name This property is required. Changes to this property will trigger replacement.
- Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
- config
This property is required. Changes to this property will trigger replacement.
Template Placement Managed Cluster Config - Required. The cluster configuration.
- labels
Changes to this property will trigger replacement.
- The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
- cluster_
name This property is required. Changes to this property will trigger replacement.
- Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
- config
This property is required. Changes to this property will trigger replacement.
Template Placement Managed Cluster Config - Required. The cluster configuration.
- labels
Changes to this property will trigger replacement.
- The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
- cluster
Name This property is required. Changes to this property will trigger replacement.
- Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
- config
This property is required. Changes to this property will trigger replacement.
- Required. The cluster configuration.
- labels
Changes to this property will trigger replacement.
- The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
WorkflowTemplatePlacementManagedClusterConfig, WorkflowTemplatePlacementManagedClusterConfigArgs
- Autoscaling
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Autoscaling Config - Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
- Encryption
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Encryption Config - Encryption settings for the cluster.
- Endpoint
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Endpoint Config - Port/endpoint configuration for this cluster
- Gce
Cluster Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config - The shared Compute Engine config settings for all instances in a cluster.
- Gke
Cluster Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config - The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as
gce_cluster_config
,master_config
,worker_config
,secondary_worker_config
, andautoscaling_config
. - Initialization
Actions Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Initialization Action> - Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi - Lifecycle
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Lifecycle Config - Lifecycle setting for the cluster.
- Master
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config - The Compute Engine config settings for additional worker instances in a cluster.
- Metastore
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Metastore Config - Metastore configuration.
- Secondary
Worker Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- Security
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config - Security settings for the cluster.
- Software
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Software Config - The config settings for software inside the cluster.
- Staging
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets).
- Temp
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
- Worker
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- Autoscaling
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Autoscaling Config - Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
- Encryption
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Encryption Config - Encryption settings for the cluster.
- Endpoint
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Endpoint Config - Port/endpoint configuration for this cluster
- Gce
Cluster Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config - The shared Compute Engine config settings for all instances in a cluster.
- Gke
Cluster Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config - The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as
gce_cluster_config
,master_config
,worker_config
,secondary_worker_config
, andautoscaling_config
. - Initialization
Actions Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Initialization Action - Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi - Lifecycle
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Lifecycle Config - Lifecycle setting for the cluster.
- Master
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config - The Compute Engine config settings for additional worker instances in a cluster.
- Metastore
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Metastore Config - Metastore configuration.
- Secondary
Worker Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- Security
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config - Security settings for the cluster.
- Software
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Software Config - The config settings for software inside the cluster.
- Staging
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets).
- Temp
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
- Worker
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- autoscaling
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Autoscaling Config - Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
- encryption
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Encryption Config - Encryption settings for the cluster.
- endpoint
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Endpoint Config - Port/endpoint configuration for this cluster
- gce
Cluster Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config - The shared Compute Engine config settings for all instances in a cluster.
- gke
Cluster Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config - The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as
gce_cluster_config
,master_config
,worker_config
,secondary_worker_config
, andautoscaling_config
. - initialization
Actions Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Initialization Action> - Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi - lifecycle
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Lifecycle Config - Lifecycle setting for the cluster.
- master
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config - The Compute Engine config settings for additional worker instances in a cluster.
- metastore
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Metastore Config - Metastore configuration.
- secondary
Worker Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- security
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config - Security settings for the cluster.
- software
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Software Config - The config settings for software inside the cluster.
- staging
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets).
- temp
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
- worker
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- autoscaling
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Autoscaling Config - Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
- encryption
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Encryption Config - Encryption settings for the cluster.
- endpoint
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Endpoint Config - Port/endpoint configuration for this cluster
- gce
Cluster Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config - The shared Compute Engine config settings for all instances in a cluster.
- gke
Cluster Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config - The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as
gce_cluster_config
,master_config
,worker_config
,secondary_worker_config
, andautoscaling_config
. - initialization
Actions Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Initialization Action[] - Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi - lifecycle
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Lifecycle Config - Lifecycle setting for the cluster.
- master
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config - The Compute Engine config settings for additional worker instances in a cluster.
- metastore
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Metastore Config - Metastore configuration.
- secondary
Worker Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- security
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config - Security settings for the cluster.
- software
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Software Config - The config settings for software inside the cluster.
- staging
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets).
- temp
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
- worker
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- autoscaling_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Autoscaling Config - Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
- encryption_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Encryption Config - Encryption settings for the cluster.
- endpoint_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Endpoint Config - Port/endpoint configuration for this cluster
- gce_
cluster_ config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config - The shared Compute Engine config settings for all instances in a cluster.
- gke_
cluster_ config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config - The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as
gce_cluster_config
,master_config
,worker_config
,secondary_worker_config
, andautoscaling_config
. - initialization_
actions Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Initialization Action] - Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi - lifecycle_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Lifecycle Config - Lifecycle setting for the cluster.
- master_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config - The Compute Engine config settings for additional worker instances in a cluster.
- metastore_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Metastore Config - Metastore configuration.
- secondary_
worker_ config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- security_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config - Security settings for the cluster.
- software_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Software Config - The config settings for software inside the cluster.
- staging_
bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets).
- temp_
bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
- worker_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config - The Compute Engine config settings for additional worker instances in a cluster.
- autoscaling
Config Changes to this property will trigger replacement.
- Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
- encryption
Config Changes to this property will trigger replacement.
- Encryption settings for the cluster.
- endpoint
Config Changes to this property will trigger replacement.
- Port/endpoint configuration for this cluster
- gce
Cluster Config Changes to this property will trigger replacement.
- The shared Compute Engine config settings for all instances in a cluster.
- gke
Cluster Config Changes to this property will trigger replacement.
- The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as
gce_cluster_config
,master_config
,worker_config
,secondary_worker_config
, andautoscaling_config
. - initialization
Actions Changes to this property will trigger replacement.
- Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's
role
metadata to run an executable on a master or worker node, as shown below usingcurl
(you can also usewget
): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi - lifecycle
Config Changes to this property will trigger replacement.
- Lifecycle setting for the cluster.
- master
Config Changes to this property will trigger replacement.
- The Compute Engine config settings for additional worker instances in a cluster.
- metastore
Config Changes to this property will trigger replacement.
- Metastore configuration.
- secondary
Worker Config Changes to this property will trigger replacement.
- The Compute Engine config settings for additional worker instances in a cluster.
- security
Config Changes to this property will trigger replacement.
- Security settings for the cluster.
- software
Config Changes to this property will trigger replacement.
- The config settings for software inside the cluster.
- staging
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets).
- temp
Bucket Changes to this property will trigger replacement.
- A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
- worker
Config Changes to this property will trigger replacement.
- The Compute Engine config settings for additional worker instances in a cluster.
WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs
- Policy
Changes to this property will trigger replacement.
- The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
Note that the policy must be in the same project and Dataproc region.
- Policy
Changes to this property will trigger replacement.
- The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
Note that the policy must be in the same project and Dataproc region.
- policy
Changes to this property will trigger replacement.
- The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
Note that the policy must be in the same project and Dataproc region.
- policy
Changes to this property will trigger replacement.
- The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
Note that the policy must be in the same project and Dataproc region.
- policy
Changes to this property will trigger replacement.
- The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
Note that the policy must be in the same project and Dataproc region.
- policy
Changes to this property will trigger replacement.
- The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
Note that the policy must be in the same project and Dataproc region.
WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs
- Gce
Pd Kms Key Name Changes to this property will trigger replacement.
- The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- Gce
Pd Kms Key Name Changes to this property will trigger replacement.
- The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- gce
Pd Kms Key Name Changes to this property will trigger replacement.
- The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- gce
Pd Kms Key Name Changes to this property will trigger replacement.
- The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- gce_
pd_ kms_ key_ name Changes to this property will trigger replacement.
- The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
- gce
Pd Kms Key Name Changes to this property will trigger replacement.
- The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs
- Enable
Http Port Access Changes to this property will trigger replacement.
- If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
- Http
Ports Dictionary<string, string> - Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
- Enable
Http Port Access Changes to this property will trigger replacement.
- If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
- Http
Ports map[string]string - Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
- enable
Http Port Access Changes to this property will trigger replacement.
- If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
- http
Ports Map<String,String> - Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
- enable
Http Port Access Changes to this property will trigger replacement.
- If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
- http
Ports {[key: string]: string} - Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
- enable_
http_ port_ access Changes to this property will trigger replacement.
- If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
- http_
ports Mapping[str, str] - Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
- enable
Http Port Access Changes to this property will trigger replacement.
- If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
- http
Ports Map<String> - Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs
- Internal
Ip Only Changes to this property will trigger replacement.
- If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This
internal_ip_only
restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. - Metadata
Changes to this property will trigger replacement.
- The Compute Engine metadata entries to add to all instances (see About VM metadata).
- Network
Changes to this property will trigger replacement.
- The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither
network_uri
norsubnetwork_uri
is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*
default` - Node
Group Affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Node Group Affinity - Node Group Affinity for sole-tenant clusters.
- Private
Ipv6Google Access Changes to this property will trigger replacement.
- The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
- Reservation
Affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Reservation Affinity - Reservation Affinity for consuming Zonal reservation.
- Service
Account Changes to this property will trigger replacement.
- The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
- Service
Account Scopes Changes to this property will trigger replacement.
- The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
- Shielded
Instance Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Shielded Instance Config - Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
- Subnetwork
Changes to this property will trigger replacement.
- The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0
*sub0
Changes to this property will trigger replacement.
- The Compute Engine tags to add to all instances (see Manage tags for resources).
- Zone
Changes to this property will trigger replacement.
- The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
*us-central1-f
- Internal
Ip Only Changes to this property will trigger replacement.
- If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This
internal_ip_only
restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. - Metadata
Changes to this property will trigger replacement.
- The Compute Engine metadata entries to add to all instances (see About VM metadata).
- Network
Changes to this property will trigger replacement.
- The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither
network_uri
norsubnetwork_uri
is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*
default` - Node
Group Affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Node Group Affinity - Node Group Affinity for sole-tenant clusters.
- Private
Ipv6Google Access Changes to this property will trigger replacement.
- The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
- Reservation
Affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Reservation Affinity - Reservation Affinity for consuming Zonal reservation.
- Service
Account Changes to this property will trigger replacement.
- The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
- Service
Account Scopes Changes to this property will trigger replacement.
- The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
- Shielded
Instance Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Shielded Instance Config - Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
- Subnetwork
Changes to this property will trigger replacement.
- The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0
*sub0
Changes to this property will trigger replacement.
- The Compute Engine tags to add to all instances (see Manage tags for resources).
- Zone
Changes to this property will trigger replacement.
- The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
*us-central1-f
- internal
Ip Only Changes to this property will trigger replacement.
- If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This
internal_ip_only
restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. - metadata
Changes to this property will trigger replacement.
- The Compute Engine metadata entries to add to all instances (see About VM metadata).
- network
Changes to this property will trigger replacement.
- The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither
network_uri
norsubnetwork_uri
is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*
default` - node
Group Affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Node Group Affinity - Node Group Affinity for sole-tenant clusters.
- private
Ipv6Google Access Changes to this property will trigger replacement.
- The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
- reservation
Affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Reservation Affinity - Reservation Affinity for consuming Zonal reservation.
- service
Account Changes to this property will trigger replacement.
- The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
- service
Account Scopes Changes to this property will trigger replacement.
- The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
- shielded
Instance Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Shielded Instance Config - Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
- subnetwork
Changes to this property will trigger replacement.
- The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0
*sub0
Changes to this property will trigger replacement.
- The Compute Engine tags to add to all instances (see Manage tags for resources).
- zone
Changes to this property will trigger replacement.
- The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
*us-central1-f
- internal
Ip Only Changes to this property will trigger replacement.
- If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This
internal_ip_only
restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. - metadata
Changes to this property will trigger replacement.
- The Compute Engine metadata entries to add to all instances (see About VM metadata).
- network
Changes to this property will trigger replacement.
- The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither
network_uri
norsubnetwork_uri
is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*
default` - node
Group Affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Node Group Affinity - Node Group Affinity for sole-tenant clusters.
- private
Ipv6Google Access Changes to this property will trigger replacement.
- The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
- reservation
Affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Reservation Affinity - Reservation Affinity for consuming Zonal reservation.
- service
Account Changes to this property will trigger replacement.
- The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
- service
Account Scopes Changes to this property will trigger replacement.
- The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
- shielded
Instance Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Shielded Instance Config - Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
- subnetwork
Changes to this property will trigger replacement.
- The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0
*sub0
Changes to this property will trigger replacement.
- The Compute Engine tags to add to all instances (see Manage tags for resources).
- zone
Changes to this property will trigger replacement.
- The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
*us-central1-f
- internal_
ip_ only Changes to this property will trigger replacement.
- If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This
internal_ip_only
restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. - metadata
Changes to this property will trigger replacement.
- The Compute Engine metadata entries to add to all instances (see About VM metadata).
- network
Changes to this property will trigger replacement.
- The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither
network_uri
norsubnetwork_uri
is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*
default` - node_
group_ affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Node Group Affinity - Node Group Affinity for sole-tenant clusters.
- private_
ipv6_ google_ access Changes to this property will trigger replacement.
- The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
- reservation_
affinity Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Reservation Affinity - Reservation Affinity for consuming Zonal reservation.
- service_
account Changes to this property will trigger replacement.
- The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
- service_
account_ scopes Changes to this property will trigger replacement.
- The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
- shielded_
instance_ config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gce Cluster Config Shielded Instance Config - Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
- subnetwork
Changes to this property will trigger replacement.
- The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0
*sub0
Changes to this property will trigger replacement.
- The Compute Engine tags to add to all instances (see Manage tags for resources).
- zone
Changes to this property will trigger replacement.
- The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
*us-central1-f
- internal
Ip Only Changes to this property will trigger replacement.
- If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This
internal_ip_only
restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. - metadata
Changes to this property will trigger replacement.
- The Compute Engine metadata entries to add to all instances (see About VM metadata).
- network
Changes to this property will trigger replacement.
- The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither
network_uri
norsubnetwork_uri
is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*
default` - node
Group Affinity Changes to this property will trigger replacement.
- Node Group Affinity for sole-tenant clusters.
- private
Ipv6Google Access Changes to this property will trigger replacement.
- The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
- reservation
Affinity Changes to this property will trigger replacement.
- Reservation Affinity for consuming Zonal reservation.
- service
Account Changes to this property will trigger replacement.
- The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
- service
Account Scopes Changes to this property will trigger replacement.
- The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
- shielded
Instance Config Changes to this property will trigger replacement.
- Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below.
- subnetwork
Changes to this property will trigger replacement.
- The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0
*sub0
Changes to this property will trigger replacement.
- The Compute Engine tags to add to all instances (see Manage tags for resources).
- zone
Changes to this property will trigger replacement.
- The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/
*us-central1-f
WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs
- Node
Group This property is required. Changes to this property will trigger replacement.
- Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1
*
node-group-1`
- Node
Group This property is required. Changes to this property will trigger replacement.
- Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1
*
node-group-1`
- node
Group This property is required. Changes to this property will trigger replacement.
- Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1
*
node-group-1`
- node
Group This property is required. Changes to this property will trigger replacement.
- Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1
*
node-group-1`
- node_
group This property is required. Changes to this property will trigger replacement.
- Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1
*
node-group-1`
- node
Group This property is required. Changes to this property will trigger replacement.
- Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1
*
node-group-1`
WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs
- Consume
Reservation Type Changes to this property will trigger replacement.
- Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
- Key
Changes to this property will trigger replacement.
- Corresponds to the label key of reservation resource.
- Values
Changes to this property will trigger replacement.
- Corresponds to the label values of reservation resource.
- Consume
Reservation Type Changes to this property will trigger replacement.
- Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
- Key
Changes to this property will trigger replacement.
- Corresponds to the label key of reservation resource.
- Values
Changes to this property will trigger replacement.
- Corresponds to the label values of reservation resource.
- consume
Reservation Type Changes to this property will trigger replacement.
- Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
- key
Changes to this property will trigger replacement.
- Corresponds to the label key of reservation resource.
- values
Changes to this property will trigger replacement.
- Corresponds to the label values of reservation resource.
- consume
Reservation Type Changes to this property will trigger replacement.
- Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
- key
Changes to this property will trigger replacement.
- Corresponds to the label key of reservation resource.
- values
Changes to this property will trigger replacement.
- Corresponds to the label values of reservation resource.
- consume_
reservation_ type Changes to this property will trigger replacement.
- Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
- key
Changes to this property will trigger replacement.
- Corresponds to the label key of reservation resource.
- values
Changes to this property will trigger replacement.
- Corresponds to the label values of reservation resource.
- consume
Reservation Type Changes to this property will trigger replacement.
- Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
- key
Changes to this property will trigger replacement.
- Corresponds to the label key of reservation resource.
- values
Changes to this property will trigger replacement.
- Corresponds to the label values of reservation resource.
WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs
- Enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether instances have Integrity Monitoring enabled.
- Enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether instances have Secure Boot enabled.
- Enable
Vtpm Changes to this property will trigger replacement.
- Defines whether instances have the vTPM enabled.
- Enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether instances have Integrity Monitoring enabled.
- Enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether instances have Secure Boot enabled.
- Enable
Vtpm Changes to this property will trigger replacement.
- Defines whether instances have the vTPM enabled.
- enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether instances have Integrity Monitoring enabled.
- enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether instances have Secure Boot enabled.
- enable
Vtpm Changes to this property will trigger replacement.
- Defines whether instances have the vTPM enabled.
- enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether instances have Integrity Monitoring enabled.
- enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether instances have Secure Boot enabled.
- enable
Vtpm Changes to this property will trigger replacement.
- Defines whether instances have the vTPM enabled.
- enable_
integrity_ monitoring Changes to this property will trigger replacement.
- Defines whether instances have Integrity Monitoring enabled.
- enable_
secure_ boot Changes to this property will trigger replacement.
- Defines whether instances have Secure Boot enabled.
- enable_
vtpm Changes to this property will trigger replacement.
- Defines whether instances have the vTPM enabled.
- enable
Integrity Monitoring Changes to this property will trigger replacement.
- Defines whether instances have Integrity Monitoring enabled.
- enable
Secure Boot Changes to this property will trigger replacement.
- Defines whether instances have Secure Boot enabled.
- enable
Vtpm Changes to this property will trigger replacement.
- Defines whether instances have the vTPM enabled.
WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs
- Namespaced
Gke Deployment Target Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config Namespaced Gke Deployment Target - A target for the deployment.
- Namespaced
Gke Deployment Target Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config Namespaced Gke Deployment Target - A target for the deployment.
- namespaced
Gke Deployment Target Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config Namespaced Gke Deployment Target - A target for the deployment.
- namespaced
Gke Deployment Target Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config Namespaced Gke Deployment Target - A target for the deployment.
- namespaced_
gke_ deployment_ target Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Gke Cluster Config Namespaced Gke Deployment Target - A target for the deployment.
- namespaced
Gke Deployment Target Changes to this property will trigger replacement.
- A target for the deployment.
WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs
- Cluster
Namespace Changes to this property will trigger replacement.
- A namespace within the GKE cluster to deploy into.
- Target
Gke Cluster Changes to this property will trigger replacement.
- The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- Cluster
Namespace Changes to this property will trigger replacement.
- A namespace within the GKE cluster to deploy into.
- Target
Gke Cluster Changes to this property will trigger replacement.
- The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- cluster
Namespace Changes to this property will trigger replacement.
- A namespace within the GKE cluster to deploy into.
- target
Gke Cluster Changes to this property will trigger replacement.
- The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- cluster
Namespace Changes to this property will trigger replacement.
- A namespace within the GKE cluster to deploy into.
- target
Gke Cluster Changes to this property will trigger replacement.
- The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- cluster_
namespace Changes to this property will trigger replacement.
- A namespace within the GKE cluster to deploy into.
- target_
gke_ cluster Changes to this property will trigger replacement.
- The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- cluster
Namespace Changes to this property will trigger replacement.
- A namespace within the GKE cluster to deploy into.
- target
Gke Cluster Changes to this property will trigger replacement.
- The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
WorkflowTemplatePlacementManagedClusterConfigInitializationAction, WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs
- Executable
File Changes to this property will trigger replacement.
- Required. Cloud Storage URI of executable file.
- Execution
Timeout Changes to this property will trigger replacement.
- Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
- Executable
File Changes to this property will trigger replacement.
- Required. Cloud Storage URI of executable file.
- Execution
Timeout Changes to this property will trigger replacement.
- Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
- executable
File Changes to this property will trigger replacement.
- Required. Cloud Storage URI of executable file.
- execution
Timeout Changes to this property will trigger replacement.
- Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
- executable
File Changes to this property will trigger replacement.
- Required. Cloud Storage URI of executable file.
- execution
Timeout Changes to this property will trigger replacement.
- Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
- executable_
file Changes to this property will trigger replacement.
- Required. Cloud Storage URI of executable file.
- execution_
timeout Changes to this property will trigger replacement.
- Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
- executable
File Changes to this property will trigger replacement.
- Required. Cloud Storage URI of executable file.
- execution
Timeout Changes to this property will trigger replacement.
- Amount of time executable has to complete. Default is 10 minutes (see JSON representation of JSON Mapping - Language Guide (proto 3)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs
- Auto
Delete Time Changes to this property will trigger replacement.
- The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- Auto
Delete Ttl Changes to this property will trigger replacement.
- The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- Idle
Delete Ttl Changes to this property will trigger replacement.
- The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3).
- Idle
Start stringTime - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- Auto
Delete Time Changes to this property will trigger replacement.
- The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- Auto
Delete Ttl Changes to this property will trigger replacement.
- The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- Idle
Delete Ttl Changes to this property will trigger replacement.
- The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3).
- Idle
Start stringTime - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- auto
Delete Time Changes to this property will trigger replacement.
- The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- auto
Delete Ttl Changes to this property will trigger replacement.
- The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- idle
Delete Ttl Changes to this property will trigger replacement.
- The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3).
- idle
Start StringTime - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- auto
Delete Time Changes to this property will trigger replacement.
- The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- auto
Delete Ttl Changes to this property will trigger replacement.
- The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- idle
Delete Ttl Changes to this property will trigger replacement.
- The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3).
- idle
Start stringTime - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- auto_
delete_ time Changes to this property will trigger replacement.
- The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- auto_
delete_ ttl Changes to this property will trigger replacement.
- The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- idle_
delete_ ttl Changes to this property will trigger replacement.
- The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3).
- idle_
start_ strtime - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- auto
Delete Time Changes to this property will trigger replacement.
- The time when cluster will be auto-deleted (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- auto
Delete Ttl Changes to this property will trigger replacement.
- The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3)).
- idle
Delete Ttl Changes to this property will trigger replacement.
- The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of JSON Mapping - Language Guide (proto 3).
- idle
Start StringTime - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of JSON Mapping - Language Guide (proto 3)).
WorkflowTemplatePlacementManagedClusterConfigMasterConfig, WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs
- Accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Accelerator> - The Compute Engine accelerator configuration for these instances.
- Disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Disk Config - Disk option config settings.
- Image
Changes to this property will trigger replacement.
- The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - Instance
Names List<string> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - Is
Preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- Machine
Type Changes to this property will trigger replacement.
- The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,
n1-standard-2`. - Managed
Group List<WorkflowConfigs Template Placement Managed Cluster Config Master Config Managed Group Config> - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- Min
Cpu Platform Changes to this property will trigger replacement.
- Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform.
- Num
Instances Changes to this property will trigger replacement.
- The number of VM instances in the instance group. For master instance groups, must be set to 1.
- Preemptibility
Changes to this property will trigger replacement.
- Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- Accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Accelerator - The Compute Engine accelerator configuration for these instances.
- Disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Disk Config - Disk option config settings.
- Image
Changes to this property will trigger replacement.
- The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - Instance
Names []string - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - Is
Preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- Machine
Type Changes to this property will trigger replacement.
- The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,
n1-standard-2`. - Managed
Group []WorkflowConfigs Template Placement Managed Cluster Config Master Config Managed Group Config - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- Min
Cpu Platform Changes to this property will trigger replacement.
- Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform.
- Num
Instances Changes to this property will trigger replacement.
- The number of VM instances in the instance group. For master instance groups, must be set to 1.
- Preemptibility
Changes to this property will trigger replacement.
- Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Accelerator> - The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Disk Config - Disk option config settings.
- image
Changes to this property will trigger replacement.
- The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names List<String> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible Boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,
n1-standard-2`. - managed
Group List<WorkflowConfigs Template Placement Managed Cluster Config Master Config Managed Group Config> - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform.
- num
Instances Changes to this property will trigger replacement.
- The number of VM instances in the instance group. For master instance groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Accelerator[] - The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Disk Config - Disk option config settings.
- image
Changes to this property will trigger replacement.
- The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names string[] - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,
n1-standard-2`. - managed
Group WorkflowConfigs Template Placement Managed Cluster Config Master Config Managed Group Config[] - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform.
- num
Instances Changes to this property will trigger replacement.
- The number of VM instances in the instance group. For master instance groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Accelerator] - The Compute Engine accelerator configuration for these instances.
- disk_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Master Config Disk Config - Disk option config settings.
- image
Changes to this property will trigger replacement.
- The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance_
names Sequence[str] - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is_
preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- machine_
type Changes to this property will trigger replacement.
- The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,
n1-standard-2`. - managed_
group_ Sequence[Workflowconfigs Template Placement Managed Cluster Config Master Config Managed Group Config] - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min_
cpu_ platform Changes to this property will trigger replacement.
- Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform.
- num_
instances Changes to this property will trigger replacement.
- The number of VM instances in the instance group. For master instance groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
- The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
- Disk option config settings.
- image
Changes to this property will trigger replacement.
- The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names List<String> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible Boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,
n1-standard-2`. - managed
Group List<Property Map>Configs - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Specifies the minimum cpu platform for the Instance Group. See Minimum CPU platform.
- num
Instances Changes to this property will trigger replacement.
- The number of VM instances in the instance group. For master instance groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator, WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs
- Accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- Accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- Accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- Accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator_
count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator_
type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs
- Boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- Boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- Num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- Boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- Boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- Num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot_
disk_ size_ gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot_
disk_ type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num_
local_ ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs
- Instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- Instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- Instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- Instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group StringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template StringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance_
group_ strmanager_ name - Output only. The name of the Instance Group Manager for this group.
- instance_
template_ strname - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group StringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template StringName - Output only. The name of the Instance Template used for the Managed Instance Group.
WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs
- Dataproc
Metastore Service This property is required. Changes to this property will trigger replacement.
- Required. Resource name of an existing Dataproc Metastore service. Example: *
projects/
- Dataproc
Metastore Service This property is required. Changes to this property will trigger replacement.
- Required. Resource name of an existing Dataproc Metastore service. Example: *
projects/
- dataproc
Metastore Service This property is required. Changes to this property will trigger replacement.
- Required. Resource name of an existing Dataproc Metastore service. Example: *
projects/
- dataproc
Metastore Service This property is required. Changes to this property will trigger replacement.
- Required. Resource name of an existing Dataproc Metastore service. Example: *
projects/
- dataproc_
metastore_ service This property is required. Changes to this property will trigger replacement.
- Required. Resource name of an existing Dataproc Metastore service. Example: *
projects/
- dataproc
Metastore Service This property is required. Changes to this property will trigger replacement.
- Required. Resource name of an existing Dataproc Metastore service. Example: *
projects/
WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs
- Accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Accelerator> - Optional. The Compute Engine accelerator configuration for these instances.
- Disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Disk Config - Optional. Disk option config settings.
- Image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - Instance
Names List<string> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - Is
Preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- Machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - Managed
Group List<WorkflowConfigs Template Placement Managed Cluster Config Secondary Worker Config Managed Group Config> - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- Min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- Num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- Preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- Accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Accelerator - Optional. The Compute Engine accelerator configuration for these instances.
- Disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Disk Config - Optional. Disk option config settings.
- Image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - Instance
Names []string - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - Is
Preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- Machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - Managed
Group []WorkflowConfigs Template Placement Managed Cluster Config Secondary Worker Config Managed Group Config - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- Min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- Num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- Preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Accelerator> - Optional. The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Disk Config - Optional. Disk option config settings.
- image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names List<String> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible Boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - managed
Group List<WorkflowConfigs Template Placement Managed Cluster Config Secondary Worker Config Managed Group Config> - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Accelerator[] - Optional. The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Disk Config - Optional. Disk option config settings.
- image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names string[] - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - managed
Group WorkflowConfigs Template Placement Managed Cluster Config Secondary Worker Config Managed Group Config[] - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Accelerator] - Optional. The Compute Engine accelerator configuration for these instances.
- disk_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Secondary Worker Config Disk Config - Optional. Disk option config settings.
- image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance_
names Sequence[str] - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is_
preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- machine_
type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - managed_
group_ Sequence[Workflowconfigs Template Placement Managed Cluster Config Secondary Worker Config Managed Group Config] - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min_
cpu_ platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- num_
instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
- Optional. The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
- Optional. Disk option config settings.
- image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names List<String> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible Boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - managed
Group List<Property Map>Configs - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs
- Accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- Accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- Accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- Accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator_
count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator_
type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs
- Boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- Boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- Num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- Boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- Boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- Num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot_
disk_ size_ gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot_
disk_ type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num_
local_ ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs
- Instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- Instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- Instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- Instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group StringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template StringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance_
group_ strmanager_ name - Output only. The name of the Instance Group Manager for this group.
- instance_
template_ strname - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group StringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template StringName - Output only. The name of the Instance Template used for the Managed Instance Group.
WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs
- Kerberos
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config Kerberos Config - Kerberos related configuration.
- Kerberos
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config Kerberos Config - Kerberos related configuration.
- kerberos
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config Kerberos Config - Kerberos related configuration.
- kerberos
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config Kerberos Config - Kerberos related configuration.
- kerberos_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Security Config Kerberos Config - Kerberos related configuration.
- kerberos
Config Changes to this property will trigger replacement.
- Kerberos related configuration.
WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs
- Cross
Realm Trust Admin Server Changes to this property will trigger replacement.
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- Cross
Realm Trust Kdc Changes to this property will trigger replacement.
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- Cross
Realm Trust Realm Changes to this property will trigger replacement.
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- Enable
Kerberos Changes to this property will trigger replacement.
- Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
- Kdc
Db Key Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- Key
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- Keystore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Keystore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
- Kms
Key Changes to this property will trigger replacement.
- The uri of the KMS key used to encrypt various sensitive files.
- Realm
Changes to this property will trigger replacement.
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- Root
Principal Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- Tgt
Lifetime Hours Changes to this property will trigger replacement.
- The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
- Truststore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Truststore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- Cross
Realm Trust Admin Server Changes to this property will trigger replacement.
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- Cross
Realm Trust Kdc Changes to this property will trigger replacement.
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- Cross
Realm Trust Realm Changes to this property will trigger replacement.
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- Enable
Kerberos Changes to this property will trigger replacement.
- Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
- Kdc
Db Key Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- Key
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- Keystore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Keystore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
- Kms
Key Changes to this property will trigger replacement.
- The uri of the KMS key used to encrypt various sensitive files.
- Realm
Changes to this property will trigger replacement.
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- Root
Principal Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- Tgt
Lifetime Hours Changes to this property will trigger replacement.
- The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
- Truststore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- Truststore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- cross
Realm Trust Admin Server Changes to this property will trigger replacement.
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm Trust Kdc Changes to this property will trigger replacement.
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm Trust Realm Changes to this property will trigger replacement.
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable
Kerberos Changes to this property will trigger replacement.
- Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
- kdc
Db Key Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- keystore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
- kms
Key Changes to this property will trigger replacement.
- The uri of the KMS key used to encrypt various sensitive files.
- realm
Changes to this property will trigger replacement.
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- root
Principal Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- tgt
Lifetime Hours Changes to this property will trigger replacement.
- The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
- truststore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- truststore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- cross
Realm Trust Admin Server Changes to this property will trigger replacement.
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm Trust Kdc Changes to this property will trigger replacement.
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm Trust Realm Changes to this property will trigger replacement.
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable
Kerberos Changes to this property will trigger replacement.
- Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
- kdc
Db Key Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- keystore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
- kms
Key Changes to this property will trigger replacement.
- The uri of the KMS key used to encrypt various sensitive files.
- realm
Changes to this property will trigger replacement.
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- root
Principal Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- tgt
Lifetime Hours Changes to this property will trigger replacement.
- The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
- truststore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- truststore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- cross_
realm_ trust_ admin_ server Changes to this property will trigger replacement.
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross_
realm_ trust_ kdc Changes to this property will trigger replacement.
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross_
realm_ trust_ realm Changes to this property will trigger replacement.
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable_
kerberos Changes to this property will trigger replacement.
- Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
- kdc_
db_ key Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key_
password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- keystore_
password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
- kms_
key Changes to this property will trigger replacement.
- The uri of the KMS key used to encrypt various sensitive files.
- realm
Changes to this property will trigger replacement.
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- root_
principal_ password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- tgt_
lifetime_ hours Changes to this property will trigger replacement.
- The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
- truststore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- truststore_
password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
- cross
Realm Trust Admin Server Changes to this property will trigger replacement.
- The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm Trust Kdc Changes to this property will trigger replacement.
- The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
- cross
Realm Trust Realm Changes to this property will trigger replacement.
- The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
- enable
Kerberos Changes to this property will trigger replacement.
- Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
- kdc
Db Key Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
- key
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
- keystore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- keystore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
- kms
Key Changes to this property will trigger replacement.
- The uri of the KMS key used to encrypt various sensitive files.
- realm
Changes to this property will trigger replacement.
- The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
- root
Principal Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the root principal password.
- tgt
Lifetime Hours Changes to this property will trigger replacement.
- The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
- truststore
Changes to this property will trigger replacement.
- The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
- truststore
Password Changes to this property will trigger replacement.
- The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs
- Image
Version Changes to this property will trigger replacement.
- The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
- Optional
Components Changes to this property will trigger replacement.
- The set of components to activate on the cluster.
- Properties
Changes to this property will trigger replacement.
The properties to set on daemon config files.
Property keys are specified in
prefix:property
format, for examplecore:hadoop.tmp.dir
. The following are supported prefixes and their mappings:- capacity-scheduler:
capacity-scheduler.xml
- core:
core-site.xml
- distcp:
distcp-default.xml
- hdfs:
hdfs-site.xml
- hive:
hive-site.xml
- mapred:
mapred-site.xml
- pig:
pig.properties
- spark:
spark-defaults.conf
- yarn:
yarn-site.xml
For more information, see Cluster properties.
- capacity-scheduler:
- Image
Version Changes to this property will trigger replacement.
- The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
- Optional
Components Changes to this property will trigger replacement.
- The set of components to activate on the cluster.
- Properties
Changes to this property will trigger replacement.
The properties to set on daemon config files.
Property keys are specified in
prefix:property
format, for examplecore:hadoop.tmp.dir
. The following are supported prefixes and their mappings:- capacity-scheduler:
capacity-scheduler.xml
- core:
core-site.xml
- distcp:
distcp-default.xml
- hdfs:
hdfs-site.xml
- hive:
hive-site.xml
- mapred:
mapred-site.xml
- pig:
pig.properties
- spark:
spark-defaults.conf
- yarn:
yarn-site.xml
For more information, see Cluster properties.
- capacity-scheduler:
- image
Version Changes to this property will trigger replacement.
- The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
- optional
Components Changes to this property will trigger replacement.
- The set of components to activate on the cluster.
- properties
Changes to this property will trigger replacement.
The properties to set on daemon config files.
Property keys are specified in
prefix:property
format, for examplecore:hadoop.tmp.dir
. The following are supported prefixes and their mappings:- capacity-scheduler:
capacity-scheduler.xml
- core:
core-site.xml
- distcp:
distcp-default.xml
- hdfs:
hdfs-site.xml
- hive:
hive-site.xml
- mapred:
mapred-site.xml
- pig:
pig.properties
- spark:
spark-defaults.conf
- yarn:
yarn-site.xml
For more information, see Cluster properties.
- capacity-scheduler:
- image
Version Changes to this property will trigger replacement.
- The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
- optional
Components Changes to this property will trigger replacement.
- The set of components to activate on the cluster.
- properties
Changes to this property will trigger replacement.
The properties to set on daemon config files.
Property keys are specified in
prefix:property
format, for examplecore:hadoop.tmp.dir
. The following are supported prefixes and their mappings:- capacity-scheduler:
capacity-scheduler.xml
- core:
core-site.xml
- distcp:
distcp-default.xml
- hdfs:
hdfs-site.xml
- hive:
hive-site.xml
- mapred:
mapred-site.xml
- pig:
pig.properties
- spark:
spark-defaults.conf
- yarn:
yarn-site.xml
For more information, see Cluster properties.
- capacity-scheduler:
- image_
version Changes to this property will trigger replacement.
- The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
- optional_
components Changes to this property will trigger replacement.
- The set of components to activate on the cluster.
- properties
Changes to this property will trigger replacement.
The properties to set on daemon config files.
Property keys are specified in
prefix:property
format, for examplecore:hadoop.tmp.dir
. The following are supported prefixes and their mappings:- capacity-scheduler:
capacity-scheduler.xml
- core:
core-site.xml
- distcp:
distcp-default.xml
- hdfs:
hdfs-site.xml
- hive:
hive-site.xml
- mapred:
mapred-site.xml
- pig:
pig.properties
- spark:
spark-defaults.conf
- yarn:
yarn-site.xml
For more information, see Cluster properties.
- capacity-scheduler:
- image
Version Changes to this property will trigger replacement.
- The version of software inside the cluster. It must be one of the supported Dataproc Versions, such as "1.2" (including a subminor version, such as "1.2.29"), or the "preview" version. If unspecified, it defaults to the latest Debian version.
- optional
Components Changes to this property will trigger replacement.
- The set of components to activate on the cluster.
- properties
Changes to this property will trigger replacement.
The properties to set on daemon config files.
Property keys are specified in
prefix:property
format, for examplecore:hadoop.tmp.dir
. The following are supported prefixes and their mappings:- capacity-scheduler:
capacity-scheduler.xml
- core:
core-site.xml
- distcp:
distcp-default.xml
- hdfs:
hdfs-site.xml
- hive:
hive-site.xml
- mapred:
mapred-site.xml
- pig:
pig.properties
- spark:
spark-defaults.conf
- yarn:
yarn-site.xml
For more information, see Cluster properties.
- capacity-scheduler:
WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs
- Accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Accelerator> - Optional. The Compute Engine accelerator configuration for these instances.
- Disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Disk Config - Optional. Disk option config settings.
- Image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - Instance
Names List<string> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - Is
Preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- Machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - Managed
Group List<WorkflowConfigs Template Placement Managed Cluster Config Worker Config Managed Group Config> - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- Min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- Num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- Preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- Accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Accelerator - Optional. The Compute Engine accelerator configuration for these instances.
- Disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Disk Config - Optional. Disk option config settings.
- Image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - Instance
Names []string - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - Is
Preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- Machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - Managed
Group []WorkflowConfigs Template Placement Managed Cluster Config Worker Config Managed Group Config - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- Min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- Num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- Preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Accelerator> - Optional. The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Disk Config - Optional. Disk option config settings.
- image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names List<String> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible Boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - managed
Group List<WorkflowConfigs Template Placement Managed Cluster Config Worker Config Managed Group Config> - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Accelerator[] - Optional. The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Disk Config - Optional. Disk option config settings.
- image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names string[] - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - managed
Group WorkflowConfigs Template Placement Managed Cluster Config Worker Config Managed Group Config[] - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Accelerator] - Optional. The Compute Engine accelerator configuration for these instances.
- disk_
config Changes to this property will trigger replacement.
Template Placement Managed Cluster Config Worker Config Disk Config - Optional. Disk option config settings.
- image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance_
names Sequence[str] - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is_
preemptible bool - Output only. Specifies that this instance group contains preemptible instances.
- machine_
type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - managed_
group_ Sequence[Workflowconfigs Template Placement Managed Cluster Config Worker Config Managed Group Config] - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min_
cpu_ platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- num_
instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
- accelerators
Changes to this property will trigger replacement.
- Optional. The Compute Engine accelerator configuration for these instances.
- disk
Config Changes to this property will trigger replacement.
- Optional. Disk option config settings.
- image
Changes to this property will trigger replacement.
- Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: *
https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]
*projects/[project_id]/global/images/[image-id]
*image-id
Image family examples. Dataproc will use the most recent image from the family: *https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]
*projects/[project_id]/global/images/family/[custom-image-family-name]
If the URI is unspecified, it will be inferred fromSoftwareConfig.image_version
or the system default. - instance
Names List<String> - Output only. The list of instance names. Dataproc derives the names from
cluster_name
,num_instances
, and the instance group. - is
Preemptible Boolean - Output only. Specifies that this instance group contains preemptible instances.
- machine
Type Changes to this property will trigger replacement.
- Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: *
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
*n1-standard-2
Auto Zone Exception: If you are using the Dataproc Auto Zone Placement feature, you must use the short name of the machine type resource, for example,n1-standard-2
. - managed
Group List<Property Map>Configs - Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
- min
Cpu Platform Changes to this property will trigger replacement.
- Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc > Minimum CPU Platform.
- num
Instances Changes to this property will trigger replacement.
- Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
- preemptibility
Changes to this property will trigger replacement.
- Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is
NON_PREEMPTIBLE
. This default cannot be changed. The default value for secondary instances isPREEMPTIBLE
. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs
- Accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- Accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- Accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- Accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator_
count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator_
type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
- accelerator
Count Changes to this property will trigger replacement.
- The number of the accelerator cards of this type exposed to this instance.
- accelerator
Type Changes to this property will trigger replacement.
- Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example,
nvidia-tesla-k80
.
WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs
- Boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- Boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- Num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- Boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- Boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- Num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot_
disk_ size_ gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot_
disk_ type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num_
local_ ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
- boot
Disk Size Gb Changes to this property will trigger replacement.
- Size in GB of the boot disk (default is 500GB).
- boot
Disk Type Changes to this property will trigger replacement.
- Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
- num
Local Ssds Changes to this property will trigger replacement.
- Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs
- Instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- Instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- Instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- Instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group StringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template StringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group stringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template stringName - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance_
group_ strmanager_ name - Output only. The name of the Instance Group Manager for this group.
- instance_
template_ strname - Output only. The name of the Instance Template used for the Managed Instance Group.
- instance
Group StringManager Name - Output only. The name of the Instance Group Manager for this group.
- instance
Template StringName - Output only. The name of the Instance Template used for the Managed Instance Group.
Import
WorkflowTemplate can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}
{{project}}/{{location}}/{{name}}
{{location}}/{{name}}
When using the pulumi import
command, WorkflowTemplate can be imported using one of the formats above. For example:
$ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}
$ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default {{project}}/{{location}}/{{name}}
$ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default {{location}}/{{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.