dataproc.v1beta1.workflowTemplate
"WorkflowTemplate is the Schema for the WorkflowTemplates API. A Workflow Template is a reusable workflow configuration."
Index
fn new(name)
obj metadata
fn withAnnotations(annotations)
fn withAnnotationsMixin(annotations)
fn withClusterName(clusterName)
fn withCreationTimestamp(creationTimestamp)
fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)
fn withDeletionTimestamp(deletionTimestamp)
fn withFinalizers(finalizers)
fn withFinalizersMixin(finalizers)
fn withGenerateName(generateName)
fn withGeneration(generation)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withName(name)
fn withNamespace(namespace)
fn withOwnerReferences(ownerReferences)
fn withOwnerReferencesMixin(ownerReferences)
fn withResourceVersion(resourceVersion)
fn withSelfLink(selfLink)
fn withUid(uid)
obj spec
fn withDeletionPolicy(deletionPolicy)
fn withManagementPolicies(managementPolicies)
fn withManagementPoliciesMixin(managementPolicies)
obj spec.forProvider
fn withDagTimeout(dagTimeout)
fn withJobs(jobs)
fn withJobsMixin(jobs)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withLocation(location)
fn withParameters(parameters)
fn withParametersMixin(parameters)
fn withPlacement(placement)
fn withPlacementMixin(placement)
fn withProject(project)
fn withVersion(version)
obj spec.forProvider.jobs
fn withHadoopJob(hadoopJob)
fn withHadoopJobMixin(hadoopJob)
fn withHiveJob(hiveJob)
fn withHiveJobMixin(hiveJob)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withPigJob(pigJob)
fn withPigJobMixin(pigJob)
fn withPrerequisiteStepIds(prerequisiteStepIds)
fn withPrerequisiteStepIdsMixin(prerequisiteStepIds)
fn withPrestoJob(prestoJob)
fn withPrestoJobMixin(prestoJob)
fn withPysparkJob(pysparkJob)
fn withPysparkJobMixin(pysparkJob)
fn withScheduling(scheduling)
fn withSchedulingMixin(scheduling)
fn withSparkJob(sparkJob)
fn withSparkJobMixin(sparkJob)
fn withSparkRJob(sparkRJob)
fn withSparkRJobMixin(sparkRJob)
fn withSparkSqlJob(sparkSqlJob)
fn withSparkSqlJobMixin(sparkSqlJob)
fn withStepId(stepId)
obj spec.forProvider.jobs.hadoopJob
fn withArchiveUris(archiveUris)
fn withArchiveUrisMixin(archiveUris)
fn withArgs(args)
fn withArgsMixin(args)
fn withFileUris(fileUris)
fn withFileUrisMixin(fileUris)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withMainClass(mainClass)
fn withMainJarFileUri(mainJarFileUri)
fn withProperties(properties)
fn withPropertiesMixin(properties)
obj spec.forProvider.jobs.hadoopJob.loggingConfig
obj spec.forProvider.jobs.hiveJob
fn withContinueOnFailure(continueOnFailure)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withQueryFileUri(queryFileUri)
fn withQueryList(queryList)
fn withQueryListMixin(queryList)
fn withScriptVariables(scriptVariables)
fn withScriptVariablesMixin(scriptVariables)
obj spec.forProvider.jobs.hiveJob.queryList
obj spec.forProvider.jobs.pigJob
fn withContinueOnFailure(continueOnFailure)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withQueryFileUri(queryFileUri)
fn withQueryList(queryList)
fn withQueryListMixin(queryList)
fn withScriptVariables(scriptVariables)
fn withScriptVariablesMixin(scriptVariables)
obj spec.forProvider.jobs.pigJob.loggingConfig
obj spec.forProvider.jobs.pigJob.queryList
obj spec.forProvider.jobs.prestoJob
fn withClientTags(clientTags)
fn withClientTagsMixin(clientTags)
fn withContinueOnFailure(continueOnFailure)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withOutputFormat(outputFormat)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withQueryFileUri(queryFileUri)
fn withQueryList(queryList)
fn withQueryListMixin(queryList)
obj spec.forProvider.jobs.prestoJob.loggingConfig
obj spec.forProvider.jobs.prestoJob.queryList
obj spec.forProvider.jobs.pysparkJob
fn withArchiveUris(archiveUris)
fn withArchiveUrisMixin(archiveUris)
fn withArgs(args)
fn withArgsMixin(args)
fn withFileUris(fileUris)
fn withFileUrisMixin(fileUris)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withMainPythonFileUri(mainPythonFileUri)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withPythonFileUris(pythonFileUris)
fn withPythonFileUrisMixin(pythonFileUris)
obj spec.forProvider.jobs.pysparkJob.loggingConfig
obj spec.forProvider.jobs.scheduling
obj spec.forProvider.jobs.sparkJob
fn withArchiveUris(archiveUris)
fn withArchiveUrisMixin(archiveUris)
fn withArgs(args)
fn withArgsMixin(args)
fn withFileUris(fileUris)
fn withFileUrisMixin(fileUris)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withMainClass(mainClass)
fn withMainJarFileUri(mainJarFileUri)
fn withProperties(properties)
fn withPropertiesMixin(properties)
obj spec.forProvider.jobs.sparkJob.loggingConfig
obj spec.forProvider.jobs.sparkRJob
fn withArchiveUris(archiveUris)
fn withArchiveUrisMixin(archiveUris)
fn withArgs(args)
fn withArgsMixin(args)
fn withFileUris(fileUris)
fn withFileUrisMixin(fileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withMainRFileUri(mainRFileUri)
fn withProperties(properties)
fn withPropertiesMixin(properties)
obj spec.forProvider.jobs.sparkRJob.loggingConfig
obj spec.forProvider.jobs.sparkSqlJob
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withQueryFileUri(queryFileUri)
fn withQueryList(queryList)
fn withQueryListMixin(queryList)
fn withScriptVariables(scriptVariables)
fn withScriptVariablesMixin(scriptVariables)
obj spec.forProvider.jobs.sparkSqlJob.loggingConfig
obj spec.forProvider.jobs.sparkSqlJob.queryList
obj spec.forProvider.parameters
obj spec.forProvider.placement
fn withClusterSelector(clusterSelector)
fn withClusterSelectorMixin(clusterSelector)
fn withManagedCluster(managedCluster)
fn withManagedClusterMixin(managedCluster)
obj spec.forProvider.placement.clusterSelector
obj spec.forProvider.placement.managedCluster
fn withClusterName(clusterName)
fn withConfig(config)
fn withConfigMixin(config)
fn withLabels(labels)
fn withLabelsMixin(labels)
obj spec.forProvider.placement.managedCluster.config
fn withAutoscalingConfig(autoscalingConfig)
fn withAutoscalingConfigMixin(autoscalingConfig)
fn withEncryptionConfig(encryptionConfig)
fn withEncryptionConfigMixin(encryptionConfig)
fn withEndpointConfig(endpointConfig)
fn withEndpointConfigMixin(endpointConfig)
fn withGceClusterConfig(gceClusterConfig)
fn withGceClusterConfigMixin(gceClusterConfig)
fn withInitializationActions(initializationActions)
fn withInitializationActionsMixin(initializationActions)
fn withLifecycleConfig(lifecycleConfig)
fn withLifecycleConfigMixin(lifecycleConfig)
fn withMasterConfig(masterConfig)
fn withMasterConfigMixin(masterConfig)
fn withSecondaryWorkerConfig(secondaryWorkerConfig)
fn withSecondaryWorkerConfigMixin(secondaryWorkerConfig)
fn withSecurityConfig(securityConfig)
fn withSecurityConfigMixin(securityConfig)
fn withSoftwareConfig(softwareConfig)
fn withSoftwareConfigMixin(softwareConfig)
fn withStagingBucket(stagingBucket)
fn withTempBucket(tempBucket)
fn withWorkerConfig(workerConfig)
fn withWorkerConfigMixin(workerConfig)
obj spec.forProvider.placement.managedCluster.config.autoscalingConfig
obj spec.forProvider.placement.managedCluster.config.encryptionConfig
obj spec.forProvider.placement.managedCluster.config.endpointConfig
obj spec.forProvider.placement.managedCluster.config.gceClusterConfig
fn withInternalIpOnly(internalIpOnly)
fn withMetadata(metadata)
fn withMetadataMixin(metadata)
fn withNetwork(network)
fn withNodeGroupAffinity(nodeGroupAffinity)
fn withNodeGroupAffinityMixin(nodeGroupAffinity)
fn withPrivateIpv6GoogleAccess(privateIpv6GoogleAccess)
fn withReservationAffinity(reservationAffinity)
fn withReservationAffinityMixin(reservationAffinity)
fn withServiceAccount(serviceAccount)
fn withServiceAccountScopes(serviceAccountScopes)
fn withServiceAccountScopesMixin(serviceAccountScopes)
fn withShieldedInstanceConfig(shieldedInstanceConfig)
fn withShieldedInstanceConfigMixin(shieldedInstanceConfig)
fn withSubnetwork(subnetwork)
fn withTags(tags)
fn withTagsMixin(tags)
fn withZone(zone)
obj spec.forProvider.placement.managedCluster.config.gceClusterConfig.nodeGroupAffinity
obj spec.forProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity
obj spec.forProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig
obj spec.forProvider.placement.managedCluster.config.initializationActions
obj spec.forProvider.placement.managedCluster.config.lifecycleConfig
obj spec.forProvider.placement.managedCluster.config.masterConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImage(image)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
fn withPreemptibility(preemptibility)
obj spec.forProvider.placement.managedCluster.config.masterConfig.accelerators
obj spec.forProvider.placement.managedCluster.config.masterConfig.diskConfig
obj spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImage(image)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
fn withPreemptibility(preemptibility)
obj spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.accelerators
obj spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig
obj spec.forProvider.placement.managedCluster.config.securityConfig
fn withKerberosConfig(kerberosConfig)
fn withKerberosConfigMixin(kerberosConfig)
obj spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig
fn withCrossRealmTrustAdminServer(crossRealmTrustAdminServer)
fn withCrossRealmTrustKdc(crossRealmTrustKdc)
fn withCrossRealmTrustRealm(crossRealmTrustRealm)
fn withCrossRealmTrustSharedPassword(crossRealmTrustSharedPassword)
fn withEnableKerberos(enableKerberos)
fn withKdcDbKey(kdcDbKey)
fn withKeyPassword(keyPassword)
fn withKeystore(keystore)
fn withKeystorePassword(keystorePassword)
fn withKmsKey(kmsKey)
fn withRealm(realm)
fn withRootPrincipalPassword(rootPrincipalPassword)
fn withTgtLifetimeHours(tgtLifetimeHours)
fn withTruststore(truststore)
fn withTruststorePassword(truststorePassword)
obj spec.forProvider.placement.managedCluster.config.softwareConfig
obj spec.forProvider.placement.managedCluster.config.workerConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImage(image)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
fn withPreemptibility(preemptibility)
obj spec.forProvider.placement.managedCluster.config.workerConfig.accelerators
obj spec.forProvider.placement.managedCluster.config.workerConfig.diskConfig
obj spec.initProvider
fn withDagTimeout(dagTimeout)
fn withJobs(jobs)
fn withJobsMixin(jobs)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withParameters(parameters)
fn withParametersMixin(parameters)
fn withPlacement(placement)
fn withPlacementMixin(placement)
fn withProject(project)
fn withVersion(version)
obj spec.initProvider.jobs
fn withHadoopJob(hadoopJob)
fn withHadoopJobMixin(hadoopJob)
fn withHiveJob(hiveJob)
fn withHiveJobMixin(hiveJob)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withPigJob(pigJob)
fn withPigJobMixin(pigJob)
fn withPrerequisiteStepIds(prerequisiteStepIds)
fn withPrerequisiteStepIdsMixin(prerequisiteStepIds)
fn withPrestoJob(prestoJob)
fn withPrestoJobMixin(prestoJob)
fn withPysparkJob(pysparkJob)
fn withPysparkJobMixin(pysparkJob)
fn withScheduling(scheduling)
fn withSchedulingMixin(scheduling)
fn withSparkJob(sparkJob)
fn withSparkJobMixin(sparkJob)
fn withSparkRJob(sparkRJob)
fn withSparkRJobMixin(sparkRJob)
fn withSparkSqlJob(sparkSqlJob)
fn withSparkSqlJobMixin(sparkSqlJob)
fn withStepId(stepId)
obj spec.initProvider.jobs.hadoopJob
fn withArchiveUris(archiveUris)
fn withArchiveUrisMixin(archiveUris)
fn withArgs(args)
fn withArgsMixin(args)
fn withFileUris(fileUris)
fn withFileUrisMixin(fileUris)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withMainClass(mainClass)
fn withMainJarFileUri(mainJarFileUri)
fn withProperties(properties)
fn withPropertiesMixin(properties)
obj spec.initProvider.jobs.hadoopJob.loggingConfig
obj spec.initProvider.jobs.hiveJob
fn withContinueOnFailure(continueOnFailure)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withQueryFileUri(queryFileUri)
fn withQueryList(queryList)
fn withQueryListMixin(queryList)
fn withScriptVariables(scriptVariables)
fn withScriptVariablesMixin(scriptVariables)
obj spec.initProvider.jobs.hiveJob.queryList
obj spec.initProvider.jobs.pigJob
fn withContinueOnFailure(continueOnFailure)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withQueryFileUri(queryFileUri)
fn withQueryList(queryList)
fn withQueryListMixin(queryList)
fn withScriptVariables(scriptVariables)
fn withScriptVariablesMixin(scriptVariables)
obj spec.initProvider.jobs.pigJob.loggingConfig
obj spec.initProvider.jobs.pigJob.queryList
obj spec.initProvider.jobs.prestoJob
fn withClientTags(clientTags)
fn withClientTagsMixin(clientTags)
fn withContinueOnFailure(continueOnFailure)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withOutputFormat(outputFormat)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withQueryFileUri(queryFileUri)
fn withQueryList(queryList)
fn withQueryListMixin(queryList)
obj spec.initProvider.jobs.prestoJob.loggingConfig
obj spec.initProvider.jobs.prestoJob.queryList
obj spec.initProvider.jobs.pysparkJob
fn withArchiveUris(archiveUris)
fn withArchiveUrisMixin(archiveUris)
fn withArgs(args)
fn withArgsMixin(args)
fn withFileUris(fileUris)
fn withFileUrisMixin(fileUris)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withMainPythonFileUri(mainPythonFileUri)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withPythonFileUris(pythonFileUris)
fn withPythonFileUrisMixin(pythonFileUris)
obj spec.initProvider.jobs.pysparkJob.loggingConfig
obj spec.initProvider.jobs.scheduling
obj spec.initProvider.jobs.sparkJob
fn withArchiveUris(archiveUris)
fn withArchiveUrisMixin(archiveUris)
fn withArgs(args)
fn withArgsMixin(args)
fn withFileUris(fileUris)
fn withFileUrisMixin(fileUris)
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withMainClass(mainClass)
fn withMainJarFileUri(mainJarFileUri)
fn withProperties(properties)
fn withPropertiesMixin(properties)
obj spec.initProvider.jobs.sparkJob.loggingConfig
obj spec.initProvider.jobs.sparkRJob
fn withArchiveUris(archiveUris)
fn withArchiveUrisMixin(archiveUris)
fn withArgs(args)
fn withArgsMixin(args)
fn withFileUris(fileUris)
fn withFileUrisMixin(fileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withMainRFileUri(mainRFileUri)
fn withProperties(properties)
fn withPropertiesMixin(properties)
obj spec.initProvider.jobs.sparkRJob.loggingConfig
obj spec.initProvider.jobs.sparkSqlJob
fn withJarFileUris(jarFileUris)
fn withJarFileUrisMixin(jarFileUris)
fn withLoggingConfig(loggingConfig)
fn withLoggingConfigMixin(loggingConfig)
fn withProperties(properties)
fn withPropertiesMixin(properties)
fn withQueryFileUri(queryFileUri)
fn withQueryList(queryList)
fn withQueryListMixin(queryList)
fn withScriptVariables(scriptVariables)
fn withScriptVariablesMixin(scriptVariables)
obj spec.initProvider.jobs.sparkSqlJob.loggingConfig
obj spec.initProvider.jobs.sparkSqlJob.queryList
obj spec.initProvider.parameters
obj spec.initProvider.placement
fn withClusterSelector(clusterSelector)
fn withClusterSelectorMixin(clusterSelector)
fn withManagedCluster(managedCluster)
fn withManagedClusterMixin(managedCluster)
obj spec.initProvider.placement.clusterSelector
obj spec.initProvider.placement.managedCluster
fn withClusterName(clusterName)
fn withConfig(config)
fn withConfigMixin(config)
fn withLabels(labels)
fn withLabelsMixin(labels)
obj spec.initProvider.placement.managedCluster.config
fn withAutoscalingConfig(autoscalingConfig)
fn withAutoscalingConfigMixin(autoscalingConfig)
fn withEncryptionConfig(encryptionConfig)
fn withEncryptionConfigMixin(encryptionConfig)
fn withEndpointConfig(endpointConfig)
fn withEndpointConfigMixin(endpointConfig)
fn withGceClusterConfig(gceClusterConfig)
fn withGceClusterConfigMixin(gceClusterConfig)
fn withInitializationActions(initializationActions)
fn withInitializationActionsMixin(initializationActions)
fn withLifecycleConfig(lifecycleConfig)
fn withLifecycleConfigMixin(lifecycleConfig)
fn withMasterConfig(masterConfig)
fn withMasterConfigMixin(masterConfig)
fn withSecondaryWorkerConfig(secondaryWorkerConfig)
fn withSecondaryWorkerConfigMixin(secondaryWorkerConfig)
fn withSecurityConfig(securityConfig)
fn withSecurityConfigMixin(securityConfig)
fn withSoftwareConfig(softwareConfig)
fn withSoftwareConfigMixin(softwareConfig)
fn withStagingBucket(stagingBucket)
fn withTempBucket(tempBucket)
fn withWorkerConfig(workerConfig)
fn withWorkerConfigMixin(workerConfig)
obj spec.initProvider.placement.managedCluster.config.autoscalingConfig
obj spec.initProvider.placement.managedCluster.config.encryptionConfig
obj spec.initProvider.placement.managedCluster.config.endpointConfig
obj spec.initProvider.placement.managedCluster.config.gceClusterConfig
fn withInternalIpOnly(internalIpOnly)
fn withMetadata(metadata)
fn withMetadataMixin(metadata)
fn withNetwork(network)
fn withNodeGroupAffinity(nodeGroupAffinity)
fn withNodeGroupAffinityMixin(nodeGroupAffinity)
fn withPrivateIpv6GoogleAccess(privateIpv6GoogleAccess)
fn withReservationAffinity(reservationAffinity)
fn withReservationAffinityMixin(reservationAffinity)
fn withServiceAccount(serviceAccount)
fn withServiceAccountScopes(serviceAccountScopes)
fn withServiceAccountScopesMixin(serviceAccountScopes)
fn withShieldedInstanceConfig(shieldedInstanceConfig)
fn withShieldedInstanceConfigMixin(shieldedInstanceConfig)
fn withSubnetwork(subnetwork)
fn withTags(tags)
fn withTagsMixin(tags)
fn withZone(zone)
obj spec.initProvider.placement.managedCluster.config.gceClusterConfig.nodeGroupAffinity
obj spec.initProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity
obj spec.initProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig
obj spec.initProvider.placement.managedCluster.config.initializationActions
obj spec.initProvider.placement.managedCluster.config.lifecycleConfig
obj spec.initProvider.placement.managedCluster.config.masterConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImage(image)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
fn withPreemptibility(preemptibility)
obj spec.initProvider.placement.managedCluster.config.masterConfig.accelerators
obj spec.initProvider.placement.managedCluster.config.masterConfig.diskConfig
obj spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImage(image)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
fn withPreemptibility(preemptibility)
obj spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.accelerators
obj spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig
obj spec.initProvider.placement.managedCluster.config.securityConfig
fn withKerberosConfig(kerberosConfig)
fn withKerberosConfigMixin(kerberosConfig)
obj spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig
fn withCrossRealmTrustAdminServer(crossRealmTrustAdminServer)
fn withCrossRealmTrustKdc(crossRealmTrustKdc)
fn withCrossRealmTrustRealm(crossRealmTrustRealm)
fn withCrossRealmTrustSharedPassword(crossRealmTrustSharedPassword)
fn withEnableKerberos(enableKerberos)
fn withKdcDbKey(kdcDbKey)
fn withKeyPassword(keyPassword)
fn withKeystore(keystore)
fn withKeystorePassword(keystorePassword)
fn withKmsKey(kmsKey)
fn withRealm(realm)
fn withRootPrincipalPassword(rootPrincipalPassword)
fn withTgtLifetimeHours(tgtLifetimeHours)
fn withTruststore(truststore)
fn withTruststorePassword(truststorePassword)
obj spec.initProvider.placement.managedCluster.config.softwareConfig
obj spec.initProvider.placement.managedCluster.config.workerConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImage(image)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
fn withPreemptibility(preemptibility)
obj spec.initProvider.placement.managedCluster.config.workerConfig.accelerators
obj spec.initProvider.placement.managedCluster.config.workerConfig.diskConfig
obj spec.providerConfigRef
obj spec.providerRef
obj spec.publishConnectionDetailsTo
obj spec.writeConnectionSecretToRef
Fields
fn new
new(name)
new returns an instance of WorkflowTemplate
obj metadata
"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."
fn metadata.withAnnotations
withAnnotations(annotations)
"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"
fn metadata.withAnnotationsMixin
withAnnotationsMixin(annotations)
"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"
Note: This function appends passed data to existing values
fn metadata.withClusterName
withClusterName(clusterName)
"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."
fn metadata.withCreationTimestamp
withCreationTimestamp(creationTimestamp)
"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."
fn metadata.withDeletionGracePeriodSeconds
withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)
"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."
fn metadata.withDeletionTimestamp
withDeletionTimestamp(deletionTimestamp)
"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."
fn metadata.withFinalizers
withFinalizers(finalizers)
"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."
fn metadata.withFinalizersMixin
withFinalizersMixin(finalizers)
"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."
Note: This function appends passed data to existing values
fn metadata.withGenerateName
withGenerateName(generateName)
"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"
fn metadata.withGeneration
withGeneration(generation)
"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."
fn metadata.withLabels
withLabels(labels)
"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"
fn metadata.withLabelsMixin
withLabelsMixin(labels)
"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"
Note: This function appends passed data to existing values
fn metadata.withName
withName(name)
"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"
fn metadata.withNamespace
withNamespace(namespace)
"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"
fn metadata.withOwnerReferences
withOwnerReferences(ownerReferences)
"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."
fn metadata.withOwnerReferencesMixin
withOwnerReferencesMixin(ownerReferences)
"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."
Note: This function appends passed data to existing values
fn metadata.withResourceVersion
withResourceVersion(resourceVersion)
"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"
fn metadata.withSelfLink
withSelfLink(selfLink)
"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."
fn metadata.withUid
withUid(uid)
"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
obj spec
"WorkflowTemplateSpec defines the desired state of WorkflowTemplate"
fn spec.withDeletionPolicy
withDeletionPolicy(deletionPolicy)
"DeletionPolicy specifies what will happen to the underlying external when this managed resource is deleted - either \"Delete\" or \"Orphan\" the external resource. This field is planned to be deprecated in favor of the ManagementPolicies field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223"
fn spec.withManagementPolicies
withManagementPolicies(managementPolicies)
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. ManagementPolicies specify the array of actions Crossplane is allowed to take on the managed and external resources. This field is planned to replace the DeletionPolicy field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. If both are custom, the DeletionPolicy field will be ignored. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md"
fn spec.withManagementPoliciesMixin
withManagementPoliciesMixin(managementPolicies)
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. ManagementPolicies specify the array of actions Crossplane is allowed to take on the managed and external resources. This field is planned to replace the DeletionPolicy field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. If both are custom, the DeletionPolicy field will be ignored. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md"
Note: This function appends passed data to existing values
obj spec.forProvider
fn spec.forProvider.withDagTimeout
withDagTimeout(dagTimeout)
"(Beta only) Optional. Timeout duration for the DAG of jobs. You can use \"s\", \"m\", \"h\", and \"d\" suffixes for second, minute, hour, and day duration values, respectively. The timeout duration must be from 10 minutes (\"10m\") to 24 hours (\"24h\" or \"1d\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a (/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted."
fn spec.forProvider.withJobs
withJobs(jobs)
"Required. The Directed Acyclic Graph of Jobs to submit."
fn spec.forProvider.withJobsMixin
withJobsMixin(jobs)
"Required. The Directed Acyclic Graph of Jobs to submit."
Note: This function appends passed data to existing values
fn spec.forProvider.withLabels
withLabels(labels)
"Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster."
fn spec.forProvider.withLabelsMixin
withLabelsMixin(labels)
"Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.withLocation
withLocation(location)
"The location for the resource"
fn spec.forProvider.withParameters
withParameters(parameters)
"Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated."
fn spec.forProvider.withParametersMixin
withParametersMixin(parameters)
"Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated."
Note: This function appends passed data to existing values
fn spec.forProvider.withPlacement
withPlacement(placement)
"Required. WorkflowTemplate scheduling information."
fn spec.forProvider.withPlacementMixin
withPlacementMixin(placement)
"Required. WorkflowTemplate scheduling information."
Note: This function appends passed data to existing values
fn spec.forProvider.withProject
withProject(project)
"The project for the resource"
fn spec.forProvider.withVersion
withVersion(version)
"Optional. Used to perform a consistent read-modify-write. This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request."
obj spec.forProvider.jobs
"Required. The Directed Acyclic Graph of Jobs to submit."
fn spec.forProvider.jobs.withHadoopJob
withHadoopJob(hadoopJob)
"Optional. Job is a Hadoop job."
fn spec.forProvider.jobs.withHadoopJobMixin
withHadoopJobMixin(hadoopJob)
"Optional. Job is a Hadoop job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withHiveJob
withHiveJob(hiveJob)
"Optional. Job is a Hive job."
fn spec.forProvider.jobs.withHiveJobMixin
withHiveJobMixin(hiveJob)
"Optional. Job is a Hive job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withLabels
withLabels(labels)
"Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job."
fn spec.forProvider.jobs.withLabelsMixin
withLabelsMixin(labels)
"Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withPigJob
withPigJob(pigJob)
"Optional. Job is a Pig job."
fn spec.forProvider.jobs.withPigJobMixin
withPigJobMixin(pigJob)
"Optional. Job is a Pig job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withPrerequisiteStepIds
withPrerequisiteStepIds(prerequisiteStepIds)
"Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow."
fn spec.forProvider.jobs.withPrerequisiteStepIdsMixin
withPrerequisiteStepIdsMixin(prerequisiteStepIds)
"Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withPrestoJob
withPrestoJob(prestoJob)
"Optional. Job is a Presto job."
fn spec.forProvider.jobs.withPrestoJobMixin
withPrestoJobMixin(prestoJob)
"Optional. Job is a Presto job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withPysparkJob
withPysparkJob(pysparkJob)
"Optional. Job is a PySpark job."
fn spec.forProvider.jobs.withPysparkJobMixin
withPysparkJobMixin(pysparkJob)
"Optional. Job is a PySpark job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withScheduling
withScheduling(scheduling)
"Optional. Job scheduling configuration."
fn spec.forProvider.jobs.withSchedulingMixin
withSchedulingMixin(scheduling)
"Optional. Job scheduling configuration."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withSparkJob
withSparkJob(sparkJob)
"Optional. Job is a Spark job."
fn spec.forProvider.jobs.withSparkJobMixin
withSparkJobMixin(sparkJob)
"Optional. Job is a Spark job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withSparkRJob
withSparkRJob(sparkRJob)
"Optional. Job is a SparkR job."
fn spec.forProvider.jobs.withSparkRJobMixin
withSparkRJobMixin(sparkRJob)
"Optional. Job is a SparkR job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withSparkSqlJob
withSparkSqlJob(sparkSqlJob)
"Optional. Job is a SparkSql job."
fn spec.forProvider.jobs.withSparkSqlJobMixin
withSparkSqlJobMixin(sparkSqlJob)
"Optional. Job is a SparkSql job."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.withStepId
withStepId(stepId)
"Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters."
obj spec.forProvider.jobs.hadoopJob
"Optional. Job is a Hadoop job."
fn spec.forProvider.jobs.hadoopJob.withArchiveUris
withArchiveUris(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
fn spec.forProvider.jobs.hadoopJob.withArchiveUrisMixin
withArchiveUrisMixin(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.hadoopJob.withArgs
withArgs(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
fn spec.forProvider.jobs.hadoopJob.withArgsMixin
withArgsMixin(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.hadoopJob.withFileUris
withFileUris(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
fn spec.forProvider.jobs.hadoopJob.withFileUrisMixin
withFileUrisMixin(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.hadoopJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.forProvider.jobs.hadoopJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.hadoopJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.hadoopJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.hadoopJob.withMainClass
withMainClass(mainClass)
"The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris."
fn spec.forProvider.jobs.hadoopJob.withMainJarFileUri
withMainJarFileUri(mainJarFileUri)
"The HCFS URI of the jar file that contains the main class."
fn spec.forProvider.jobs.hadoopJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.jobs.hadoopJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.hadoopJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.hadoopJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.forProvider.jobs.hadoopJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.hiveJob
"Optional. Job is a Hive job."
fn spec.forProvider.jobs.hiveJob.withContinueOnFailure
withContinueOnFailure(continueOnFailure)
"Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries."
fn spec.forProvider.jobs.hiveJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.forProvider.jobs.hiveJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.hiveJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.jobs.hiveJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.hiveJob.withQueryFileUri
withQueryFileUri(queryFileUri)
"The HCFS URI of the script that contains SQL queries."
fn spec.forProvider.jobs.hiveJob.withQueryList
withQueryList(queryList)
"A list of queries."
fn spec.forProvider.jobs.hiveJob.withQueryListMixin
withQueryListMixin(queryList)
"A list of queries."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.hiveJob.withScriptVariables
withScriptVariables(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
fn spec.forProvider.jobs.hiveJob.withScriptVariablesMixin
withScriptVariablesMixin(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.hiveJob.queryList
"A list of queries."
fn spec.forProvider.jobs.hiveJob.queryList.withQueries
withQueries(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
fn spec.forProvider.jobs.hiveJob.queryList.withQueriesMixin
withQueriesMixin(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.pigJob
"Optional. Job is a Pig job."
fn spec.forProvider.jobs.pigJob.withContinueOnFailure
withContinueOnFailure(continueOnFailure)
"Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries."
fn spec.forProvider.jobs.pigJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.forProvider.jobs.pigJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pigJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.pigJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pigJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.jobs.pigJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pigJob.withQueryFileUri
withQueryFileUri(queryFileUri)
"The HCFS URI of the script that contains SQL queries."
fn spec.forProvider.jobs.pigJob.withQueryList
withQueryList(queryList)
"A list of queries."
fn spec.forProvider.jobs.pigJob.withQueryListMixin
withQueryListMixin(queryList)
"A list of queries."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pigJob.withScriptVariables
withScriptVariables(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
fn spec.forProvider.jobs.pigJob.withScriptVariablesMixin
withScriptVariablesMixin(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.pigJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.pigJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.forProvider.jobs.pigJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.pigJob.queryList
"A list of queries."
fn spec.forProvider.jobs.pigJob.queryList.withQueries
withQueries(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
fn spec.forProvider.jobs.pigJob.queryList.withQueriesMixin
withQueriesMixin(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.prestoJob
"Optional. Job is a Presto job."
fn spec.forProvider.jobs.prestoJob.withClientTags
withClientTags(clientTags)
"Optional. Presto client tags to attach to this query"
fn spec.forProvider.jobs.prestoJob.withClientTagsMixin
withClientTagsMixin(clientTags)
"Optional. Presto client tags to attach to this query"
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.prestoJob.withContinueOnFailure
withContinueOnFailure(continueOnFailure)
"Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries."
fn spec.forProvider.jobs.prestoJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.prestoJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.prestoJob.withOutputFormat
withOutputFormat(outputFormat)
"Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats"
fn spec.forProvider.jobs.prestoJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.jobs.prestoJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.prestoJob.withQueryFileUri
withQueryFileUri(queryFileUri)
"The HCFS URI of the script that contains SQL queries."
fn spec.forProvider.jobs.prestoJob.withQueryList
withQueryList(queryList)
"A list of queries."
fn spec.forProvider.jobs.prestoJob.withQueryListMixin
withQueryListMixin(queryList)
"A list of queries."
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.prestoJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.prestoJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.forProvider.jobs.prestoJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.prestoJob.queryList
"A list of queries."
fn spec.forProvider.jobs.prestoJob.queryList.withQueries
withQueries(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
fn spec.forProvider.jobs.prestoJob.queryList.withQueriesMixin
withQueriesMixin(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.pysparkJob
"Optional. Job is a PySpark job."
fn spec.forProvider.jobs.pysparkJob.withArchiveUris
withArchiveUris(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
fn spec.forProvider.jobs.pysparkJob.withArchiveUrisMixin
withArchiveUrisMixin(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pysparkJob.withArgs
withArgs(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
fn spec.forProvider.jobs.pysparkJob.withArgsMixin
withArgsMixin(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pysparkJob.withFileUris
withFileUris(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
fn spec.forProvider.jobs.pysparkJob.withFileUrisMixin
withFileUrisMixin(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pysparkJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.forProvider.jobs.pysparkJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pysparkJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.pysparkJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pysparkJob.withMainPythonFileUri
withMainPythonFileUri(mainPythonFileUri)
"Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file."
fn spec.forProvider.jobs.pysparkJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.jobs.pysparkJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.pysparkJob.withPythonFileUris
withPythonFileUris(pythonFileUris)
"Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip."
fn spec.forProvider.jobs.pysparkJob.withPythonFileUrisMixin
withPythonFileUrisMixin(pythonFileUris)
"Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip."
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.pysparkJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.pysparkJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.forProvider.jobs.pysparkJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.scheduling
"Optional. Job scheduling configuration."
fn spec.forProvider.jobs.scheduling.withMaxFailuresPerHour
withMaxFailuresPerHour(maxFailuresPerHour)
"Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10."
fn spec.forProvider.jobs.scheduling.withMaxFailuresTotal
withMaxFailuresTotal(maxFailuresTotal)
"Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240"
obj spec.forProvider.jobs.sparkJob
"Optional. Job is a Spark job."
fn spec.forProvider.jobs.sparkJob.withArchiveUris
withArchiveUris(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
fn spec.forProvider.jobs.sparkJob.withArchiveUrisMixin
withArchiveUrisMixin(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkJob.withArgs
withArgs(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
fn spec.forProvider.jobs.sparkJob.withArgsMixin
withArgsMixin(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkJob.withFileUris
withFileUris(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
fn spec.forProvider.jobs.sparkJob.withFileUrisMixin
withFileUrisMixin(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.forProvider.jobs.sparkJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.sparkJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkJob.withMainClass
withMainClass(mainClass)
"The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris."
fn spec.forProvider.jobs.sparkJob.withMainJarFileUri
withMainJarFileUri(mainJarFileUri)
"The HCFS URI of the jar file that contains the main class."
fn spec.forProvider.jobs.sparkJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.jobs.sparkJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.sparkJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.sparkJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.forProvider.jobs.sparkJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.sparkRJob
"Optional. Job is a SparkR job."
fn spec.forProvider.jobs.sparkRJob.withArchiveUris
withArchiveUris(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
fn spec.forProvider.jobs.sparkRJob.withArchiveUrisMixin
withArchiveUrisMixin(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkRJob.withArgs
withArgs(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
fn spec.forProvider.jobs.sparkRJob.withArgsMixin
withArgsMixin(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkRJob.withFileUris
withFileUris(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
fn spec.forProvider.jobs.sparkRJob.withFileUrisMixin
withFileUrisMixin(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkRJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.sparkRJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkRJob.withMainRFileUri
withMainRFileUri(mainRFileUri)
"Required. The HCFS URI of the main R file to use as the driver. Must be a .R file."
fn spec.forProvider.jobs.sparkRJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.jobs.sparkRJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.sparkRJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.sparkRJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.forProvider.jobs.sparkRJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.sparkSqlJob
"Optional. Job is a SparkSql job."
fn spec.forProvider.jobs.sparkSqlJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.forProvider.jobs.sparkSqlJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkSqlJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.sparkSqlJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkSqlJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.jobs.sparkSqlJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkSqlJob.withQueryFileUri
withQueryFileUri(queryFileUri)
"The HCFS URI of the script that contains SQL queries."
fn spec.forProvider.jobs.sparkSqlJob.withQueryList
withQueryList(queryList)
"A list of queries."
fn spec.forProvider.jobs.sparkSqlJob.withQueryListMixin
withQueryListMixin(queryList)
"A list of queries."
Note: This function appends passed data to existing values
fn spec.forProvider.jobs.sparkSqlJob.withScriptVariables
withScriptVariables(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
fn spec.forProvider.jobs.sparkSqlJob.withScriptVariablesMixin
withScriptVariablesMixin(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.sparkSqlJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.forProvider.jobs.sparkSqlJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.forProvider.jobs.sparkSqlJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.forProvider.jobs.sparkSqlJob.queryList
"A list of queries."
fn spec.forProvider.jobs.sparkSqlJob.queryList.withQueries
withQueries(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
fn spec.forProvider.jobs.sparkSqlJob.queryList.withQueriesMixin
withQueriesMixin(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
Note: This function appends passed data to existing values
obj spec.forProvider.parameters
"Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated."
fn spec.forProvider.parameters.withDescription
withDescription(description)
"Optional. Brief description of the parameter. Must not exceed 1024 characters."
fn spec.forProvider.parameters.withFields
withFields(fields)
"Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args"
fn spec.forProvider.parameters.withFieldsMixin
withFieldsMixin(fields)
"Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args"
Note: This function appends passed data to existing values
fn spec.forProvider.parameters.withName
withName(name)
"Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters."
fn spec.forProvider.parameters.withValidation
withValidation(validation)
"Optional. Validation rules to be applied to this parameter's value."
fn spec.forProvider.parameters.withValidationMixin
withValidationMixin(validation)
"Optional. Validation rules to be applied to this parameter's value."
Note: This function appends passed data to existing values
obj spec.forProvider.parameters.validation
"Optional. Validation rules to be applied to this parameter's value."
fn spec.forProvider.parameters.validation.withRegex
withRegex(regex)
"Validation based on regular expressions."
fn spec.forProvider.parameters.validation.withRegexMixin
withRegexMixin(regex)
"Validation based on regular expressions."
Note: This function appends passed data to existing values
fn spec.forProvider.parameters.validation.withValues
withValues(values)
"Required. List of allowed values for the parameter."
fn spec.forProvider.parameters.validation.withValuesMixin
withValuesMixin(values)
"Required. List of allowed values for the parameter."
Note: This function appends passed data to existing values
obj spec.forProvider.parameters.validation.regex
"Validation based on regular expressions."
fn spec.forProvider.parameters.validation.regex.withRegexes
withRegexes(regexes)
"Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient)."
fn spec.forProvider.parameters.validation.regex.withRegexesMixin
withRegexesMixin(regexes)
"Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient)."
Note: This function appends passed data to existing values
obj spec.forProvider.parameters.validation.values
"Required. List of allowed values for the parameter."
fn spec.forProvider.parameters.validation.values.withValues
withValues(values)
"Required. List of allowed values for the parameter."
fn spec.forProvider.parameters.validation.values.withValuesMixin
withValuesMixin(values)
"Required. List of allowed values for the parameter."
Note: This function appends passed data to existing values
obj spec.forProvider.placement
"Required. WorkflowTemplate scheduling information."
fn spec.forProvider.placement.withClusterSelector
withClusterSelector(clusterSelector)
"Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted."
fn spec.forProvider.placement.withClusterSelectorMixin
withClusterSelectorMixin(clusterSelector)
"Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.withManagedCluster
withManagedCluster(managedCluster)
"A cluster that is managed by the workflow."
fn spec.forProvider.placement.withManagedClusterMixin
withManagedClusterMixin(managedCluster)
"A cluster that is managed by the workflow."
Note: This function appends passed data to existing values
obj spec.forProvider.placement.clusterSelector
"Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted."
fn spec.forProvider.placement.clusterSelector.withClusterLabels
withClusterLabels(clusterLabels)
"Required. The cluster labels. Cluster must have all labels to match."
fn spec.forProvider.placement.clusterSelector.withClusterLabelsMixin
withClusterLabelsMixin(clusterLabels)
"Required. The cluster labels. Cluster must have all labels to match."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.clusterSelector.withZone
withZone(zone)
"Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f"
obj spec.forProvider.placement.managedCluster
"A cluster that is managed by the workflow."
fn spec.forProvider.placement.managedCluster.withClusterName
withClusterName(clusterName)
"Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters."
fn spec.forProvider.placement.managedCluster.withConfig
withConfig(config)
"Required. The cluster configuration."
fn spec.forProvider.placement.managedCluster.withConfigMixin
withConfigMixin(config)
"Required. The cluster configuration."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.withLabels
withLabels(labels)
"Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster."
fn spec.forProvider.placement.managedCluster.withLabelsMixin
withLabelsMixin(labels)
"Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster."
Note: This function appends passed data to existing values
obj spec.forProvider.placement.managedCluster.config
"Required. The cluster configuration."
fn spec.forProvider.placement.managedCluster.config.withAutoscalingConfig
withAutoscalingConfig(autoscalingConfig)
"Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset."
fn spec.forProvider.placement.managedCluster.config.withAutoscalingConfigMixin
withAutoscalingConfigMixin(autoscalingConfig)
"Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withEncryptionConfig
withEncryptionConfig(encryptionConfig)
"Optional. Encryption settings for the cluster."
fn spec.forProvider.placement.managedCluster.config.withEncryptionConfigMixin
withEncryptionConfigMixin(encryptionConfig)
"Optional. Encryption settings for the cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withEndpointConfig
withEndpointConfig(endpointConfig)
"Optional. Port/endpoint configuration for this cluster"
fn spec.forProvider.placement.managedCluster.config.withEndpointConfigMixin
withEndpointConfigMixin(endpointConfig)
"Optional. Port/endpoint configuration for this cluster"
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withGceClusterConfig
withGceClusterConfig(gceClusterConfig)
"Optional. The shared Compute Engine config settings for all instances in a cluster."
fn spec.forProvider.placement.managedCluster.config.withGceClusterConfigMixin
withGceClusterConfigMixin(gceClusterConfig)
"Optional. The shared Compute Engine config settings for all instances in a cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withInitializationActions
withInitializationActions(initializationActions)
"Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi"
fn spec.forProvider.placement.managedCluster.config.withInitializationActionsMixin
withInitializationActionsMixin(initializationActions)
"Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi"
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withLifecycleConfig
withLifecycleConfig(lifecycleConfig)
"Optional. Lifecycle setting for the cluster."
fn spec.forProvider.placement.managedCluster.config.withLifecycleConfigMixin
withLifecycleConfigMixin(lifecycleConfig)
"Optional. Lifecycle setting for the cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withMasterConfig
withMasterConfig(masterConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.forProvider.placement.managedCluster.config.withMasterConfigMixin
withMasterConfigMixin(masterConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withSecondaryWorkerConfig
withSecondaryWorkerConfig(secondaryWorkerConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.forProvider.placement.managedCluster.config.withSecondaryWorkerConfigMixin
withSecondaryWorkerConfigMixin(secondaryWorkerConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withSecurityConfig
withSecurityConfig(securityConfig)
"Optional. Security settings for the cluster."
fn spec.forProvider.placement.managedCluster.config.withSecurityConfigMixin
withSecurityConfigMixin(securityConfig)
"Optional. Security settings for the cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withSoftwareConfig
withSoftwareConfig(softwareConfig)
"Optional. The config settings for software inside the cluster."
fn spec.forProvider.placement.managedCluster.config.withSoftwareConfigMixin
withSoftwareConfigMixin(softwareConfig)
"Optional. The config settings for software inside the cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.withStagingBucket
withStagingBucket(stagingBucket)
"Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket))."
fn spec.forProvider.placement.managedCluster.config.withTempBucket
withTempBucket(tempBucket)
"Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket."
fn spec.forProvider.placement.managedCluster.config.withWorkerConfig
withWorkerConfig(workerConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.forProvider.placement.managedCluster.config.withWorkerConfigMixin
withWorkerConfigMixin(workerConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
Note: This function appends passed data to existing values
obj spec.forProvider.placement.managedCluster.config.autoscalingConfig
"Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset."
fn spec.forProvider.placement.managedCluster.config.autoscalingConfig.withPolicy
withPolicy(policy)
"Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region."
obj spec.forProvider.placement.managedCluster.config.encryptionConfig
"Optional. Encryption settings for the cluster."
fn spec.forProvider.placement.managedCluster.config.encryptionConfig.withGcePdKmsKeyName
withGcePdKmsKeyName(gcePdKmsKeyName)
"Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster."
obj spec.forProvider.placement.managedCluster.config.endpointConfig
"Optional. Port/endpoint configuration for this cluster"
fn spec.forProvider.placement.managedCluster.config.endpointConfig.withEnableHttpPortAccess
withEnableHttpPortAccess(enableHttpPortAccess)
"Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false."
obj spec.forProvider.placement.managedCluster.config.gceClusterConfig
"Optional. The shared Compute Engine config settings for all instances in a cluster."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withInternalIpOnly
withInternalIpOnly(internalIpOnly)
"Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withMetadata
withMetadata(metadata)
"The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata))."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withMetadataMixin
withMetadataMixin(metadata)
"The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata))."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withNetwork
withNetwork(network)
"Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see /regions/global/default*default`"
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withNodeGroupAffinity
withNodeGroupAffinity(nodeGroupAffinity)
"Optional. Node Group Affinity for sole-tenant clusters."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withNodeGroupAffinityMixin
withNodeGroupAffinityMixin(nodeGroupAffinity)
"Optional. Node Group Affinity for sole-tenant clusters."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withPrivateIpv6GoogleAccess
withPrivateIpv6GoogleAccess(privateIpv6GoogleAccess)
"Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL"
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withReservationAffinity
withReservationAffinity(reservationAffinity)
"Optional. Reservation Affinity for consuming Zonal reservation."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withReservationAffinityMixin
withReservationAffinityMixin(reservationAffinity)
"Optional. Reservation Affinity for consuming Zonal reservation."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withServiceAccount
withServiceAccount(serviceAccount)
"Optional. The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withServiceAccountScopes
withServiceAccountScopes(serviceAccountScopes)
"Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control"
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withServiceAccountScopesMixin
withServiceAccountScopesMixin(serviceAccountScopes)
"Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control"
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withShieldedInstanceConfig
withShieldedInstanceConfig(shieldedInstanceConfig)
"Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withShieldedInstanceConfigMixin
withShieldedInstanceConfigMixin(shieldedInstanceConfig)
"Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withSubnetwork
withSubnetwork(subnetwork)
"Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0"
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withTags
withTags(tags)
"The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags))."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withTagsMixin
withTagsMixin(tags)
"The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags))."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.withZone
withZone(zone)
"Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f"
obj spec.forProvider.placement.managedCluster.config.gceClusterConfig.nodeGroupAffinity
"Optional. Node Group Affinity for sole-tenant clusters."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.nodeGroupAffinity.withNodeGroup
withNodeGroup(nodeGroup)
"Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1`"
obj spec.forProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity
"Optional. Reservation Affinity for consuming Zonal reservation."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity.withConsumeReservationType
withConsumeReservationType(consumeReservationType)
"Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION"
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity.withKey
withKey(key)
"Optional. Corresponds to the label key of reservation resource."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity.withValues
withValues(values)
"Required. List of allowed values for the parameter."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity.withValuesMixin
withValuesMixin(values)
"Required. List of allowed values for the parameter."
Note: This function appends passed data to existing values
obj spec.forProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig
"Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig.withEnableIntegrityMonitoring
withEnableIntegrityMonitoring(enableIntegrityMonitoring)
"Optional. Defines whether instances have Integrity Monitoring enabled."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig.withEnableSecureBoot
withEnableSecureBoot(enableSecureBoot)
"Optional. Defines whether instances have Secure Boot enabled."
fn spec.forProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig.withEnableVtpm
withEnableVtpm(enableVtpm)
"Optional. Defines whether instances have the vTPM enabled."
obj spec.forProvider.placement.managedCluster.config.initializationActions
"Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi"
fn spec.forProvider.placement.managedCluster.config.initializationActions.withExecutableFile
withExecutableFile(executableFile)
"Required. Cloud Storage URI of executable file."
fn spec.forProvider.placement.managedCluster.config.initializationActions.withExecutionTimeout
withExecutionTimeout(executionTimeout)
"Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period."
obj spec.forProvider.placement.managedCluster.config.lifecycleConfig
"Optional. Lifecycle setting for the cluster."
fn spec.forProvider.placement.managedCluster.config.lifecycleConfig.withAutoDeleteTime
withAutoDeleteTime(autoDeleteTime)
"Optional. The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json))."
fn spec.forProvider.placement.managedCluster.config.lifecycleConfig.withAutoDeleteTtl
withAutoDeleteTtl(autoDeleteTtl)
"Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json))."
fn spec.forProvider.placement.managedCluster.config.lifecycleConfig.withIdleDeleteTtl
withIdleDeleteTtl(idleDeleteTtl)
"Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)."
obj spec.forProvider.placement.managedCluster.config.masterConfig
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.forProvider.placement.managedCluster.config.masterConfig.withAccelerators
withAccelerators(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.forProvider.placement.managedCluster.config.masterConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.masterConfig.withDiskConfig
withDiskConfig(diskConfig)
"Optional. Disk option config settings."
fn spec.forProvider.placement.managedCluster.config.masterConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Optional. Disk option config settings."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.masterConfig.withImage
withImage(image)
"Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default."
fn spec.forProvider.placement.managedCluster.config.masterConfig.withMachineType
withMachineType(machineType)
"Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`."
fn spec.forProvider.placement.managedCluster.config.masterConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu)."
fn spec.forProvider.placement.managedCluster.config.masterConfig.withNumInstances
withNumInstances(numInstances)
"Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1."
fn spec.forProvider.placement.managedCluster.config.masterConfig.withPreemptibility
withPreemptibility(preemptibility)
"Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE"
obj spec.forProvider.placement.managedCluster.config.masterConfig.accelerators
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.forProvider.placement.managedCluster.config.masterConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance."
fn spec.forProvider.placement.managedCluster.config.masterConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80."
obj spec.forProvider.placement.managedCluster.config.masterConfig.diskConfig
"Optional. Disk option config settings."
fn spec.forProvider.placement.managedCluster.config.masterConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Optional. Size in GB of the boot disk (default is 500GB)."
fn spec.forProvider.placement.managedCluster.config.masterConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-ssd\" (Persistent Disk Solid State Drive) or \"pd-standard\" (Persistent Disk Hard Disk Drive)."
fn spec.forProvider.placement.managedCluster.config.masterConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries."
obj spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withAccelerators
withAccelerators(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withDiskConfig
withDiskConfig(diskConfig)
"Optional. Disk option config settings."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Optional. Disk option config settings."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withImage
withImage(image)
"Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withMachineType
withMachineType(machineType)
"Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu)."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withNumInstances
withNumInstances(numInstances)
"Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.withPreemptibility
withPreemptibility(preemptibility)
"Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE"
obj spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.accelerators
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80."
obj spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig
"Optional. Disk option config settings."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Optional. Size in GB of the boot disk (default is 500GB)."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-ssd\" (Persistent Disk Solid State Drive) or \"pd-standard\" (Persistent Disk Hard Disk Drive)."
fn spec.forProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries."
obj spec.forProvider.placement.managedCluster.config.securityConfig
"Optional. Security settings for the cluster."
fn spec.forProvider.placement.managedCluster.config.securityConfig.withKerberosConfig
withKerberosConfig(kerberosConfig)
"Kerberos related configuration."
fn spec.forProvider.placement.managedCluster.config.securityConfig.withKerberosConfigMixin
withKerberosConfigMixin(kerberosConfig)
"Kerberos related configuration."
Note: This function appends passed data to existing values
obj spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig
"Kerberos related configuration."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withCrossRealmTrustAdminServer
withCrossRealmTrustAdminServer(crossRealmTrustAdminServer)
"Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withCrossRealmTrustKdc
withCrossRealmTrustKdc(crossRealmTrustKdc)
"Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withCrossRealmTrustRealm
withCrossRealmTrustRealm(crossRealmTrustRealm)
"Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withCrossRealmTrustSharedPassword
withCrossRealmTrustSharedPassword(crossRealmTrustSharedPassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withEnableKerberos
withEnableKerberos(enableKerberos)
"Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKdcDbKey
withKdcDbKey(kdcDbKey)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKeyPassword
withKeyPassword(keyPassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKeystore
withKeystore(keystore)
"Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKeystorePassword
withKeystorePassword(keystorePassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKmsKey
withKmsKey(kmsKey)
"Optional. The uri of the KMS key used to encrypt various sensitive files."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withRealm
withRealm(realm)
"Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withRootPrincipalPassword
withRootPrincipalPassword(rootPrincipalPassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withTgtLifetimeHours
withTgtLifetimeHours(tgtLifetimeHours)
"Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withTruststore
withTruststore(truststore)
"Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate."
fn spec.forProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withTruststorePassword
withTruststorePassword(truststorePassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc."
obj spec.forProvider.placement.managedCluster.config.softwareConfig
"Optional. The config settings for software inside the cluster."
fn spec.forProvider.placement.managedCluster.config.softwareConfig.withImageVersion
withImageVersion(imageVersion)
"Optional. The version of software inside the cluster. It must be one of the supported (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version."
fn spec.forProvider.placement.managedCluster.config.softwareConfig.withOptionalComponents
withOptionalComponents(optionalComponents)
fn spec.forProvider.placement.managedCluster.config.softwareConfig.withOptionalComponentsMixin
withOptionalComponentsMixin(optionalComponents)
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.softwareConfig.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.forProvider.placement.managedCluster.config.softwareConfig.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
obj spec.forProvider.placement.managedCluster.config.workerConfig
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.forProvider.placement.managedCluster.config.workerConfig.withAccelerators
withAccelerators(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.forProvider.placement.managedCluster.config.workerConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.workerConfig.withDiskConfig
withDiskConfig(diskConfig)
"Optional. Disk option config settings."
fn spec.forProvider.placement.managedCluster.config.workerConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Optional. Disk option config settings."
Note: This function appends passed data to existing values
fn spec.forProvider.placement.managedCluster.config.workerConfig.withImage
withImage(image)
"Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default."
fn spec.forProvider.placement.managedCluster.config.workerConfig.withMachineType
withMachineType(machineType)
"Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`."
fn spec.forProvider.placement.managedCluster.config.workerConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu)."
fn spec.forProvider.placement.managedCluster.config.workerConfig.withNumInstances
withNumInstances(numInstances)
"Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1."
fn spec.forProvider.placement.managedCluster.config.workerConfig.withPreemptibility
withPreemptibility(preemptibility)
"Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE"
obj spec.forProvider.placement.managedCluster.config.workerConfig.accelerators
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.forProvider.placement.managedCluster.config.workerConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance."
fn spec.forProvider.placement.managedCluster.config.workerConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80."
obj spec.forProvider.placement.managedCluster.config.workerConfig.diskConfig
"Optional. Disk option config settings."
fn spec.forProvider.placement.managedCluster.config.workerConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Optional. Size in GB of the boot disk (default is 500GB)."
fn spec.forProvider.placement.managedCluster.config.workerConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-ssd\" (Persistent Disk Solid State Drive) or \"pd-standard\" (Persistent Disk Hard Disk Drive)."
fn spec.forProvider.placement.managedCluster.config.workerConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries."
obj spec.initProvider
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. InitProvider holds the same fields as ForProvider, with the exception of Identifier and other resource reference fields. The fields that are in InitProvider are merged into ForProvider when the resource is created. The same fields are also added to the terraform ignore_changes hook, to avoid updating them after creation. This is useful for fields that are required on creation, but we do not desire to update them after creation, for example because of an external controller is managing them, like an autoscaler."
fn spec.initProvider.withDagTimeout
withDagTimeout(dagTimeout)
"(Beta only) Optional. Timeout duration for the DAG of jobs. You can use \"s\", \"m\", \"h\", and \"d\" suffixes for second, minute, hour, and day duration values, respectively. The timeout duration must be from 10 minutes (\"10m\") to 24 hours (\"24h\" or \"1d\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a (/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted."
fn spec.initProvider.withJobs
withJobs(jobs)
"Required. The Directed Acyclic Graph of Jobs to submit."
fn spec.initProvider.withJobsMixin
withJobsMixin(jobs)
"Required. The Directed Acyclic Graph of Jobs to submit."
Note: This function appends passed data to existing values
fn spec.initProvider.withLabels
withLabels(labels)
"Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster."
fn spec.initProvider.withLabelsMixin
withLabelsMixin(labels)
"Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.withParameters
withParameters(parameters)
"Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated."
fn spec.initProvider.withParametersMixin
withParametersMixin(parameters)
"Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated."
Note: This function appends passed data to existing values
fn spec.initProvider.withPlacement
withPlacement(placement)
"Required. WorkflowTemplate scheduling information."
fn spec.initProvider.withPlacementMixin
withPlacementMixin(placement)
"Required. WorkflowTemplate scheduling information."
Note: This function appends passed data to existing values
fn spec.initProvider.withProject
withProject(project)
"The project for the resource"
fn spec.initProvider.withVersion
withVersion(version)
"Optional. Used to perform a consistent read-modify-write. This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request."
obj spec.initProvider.jobs
"Required. The Directed Acyclic Graph of Jobs to submit."
fn spec.initProvider.jobs.withHadoopJob
withHadoopJob(hadoopJob)
"Optional. Job is a Hadoop job."
fn spec.initProvider.jobs.withHadoopJobMixin
withHadoopJobMixin(hadoopJob)
"Optional. Job is a Hadoop job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withHiveJob
withHiveJob(hiveJob)
"Optional. Job is a Hive job."
fn spec.initProvider.jobs.withHiveJobMixin
withHiveJobMixin(hiveJob)
"Optional. Job is a Hive job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withLabels
withLabels(labels)
"Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job."
fn spec.initProvider.jobs.withLabelsMixin
withLabelsMixin(labels)
"Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withPigJob
withPigJob(pigJob)
"Optional. Job is a Pig job."
fn spec.initProvider.jobs.withPigJobMixin
withPigJobMixin(pigJob)
"Optional. Job is a Pig job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withPrerequisiteStepIds
withPrerequisiteStepIds(prerequisiteStepIds)
"Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow."
fn spec.initProvider.jobs.withPrerequisiteStepIdsMixin
withPrerequisiteStepIdsMixin(prerequisiteStepIds)
"Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withPrestoJob
withPrestoJob(prestoJob)
"Optional. Job is a Presto job."
fn spec.initProvider.jobs.withPrestoJobMixin
withPrestoJobMixin(prestoJob)
"Optional. Job is a Presto job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withPysparkJob
withPysparkJob(pysparkJob)
"Optional. Job is a PySpark job."
fn spec.initProvider.jobs.withPysparkJobMixin
withPysparkJobMixin(pysparkJob)
"Optional. Job is a PySpark job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withScheduling
withScheduling(scheduling)
"Optional. Job scheduling configuration."
fn spec.initProvider.jobs.withSchedulingMixin
withSchedulingMixin(scheduling)
"Optional. Job scheduling configuration."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withSparkJob
withSparkJob(sparkJob)
"Optional. Job is a Spark job."
fn spec.initProvider.jobs.withSparkJobMixin
withSparkJobMixin(sparkJob)
"Optional. Job is a Spark job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withSparkRJob
withSparkRJob(sparkRJob)
"Optional. Job is a SparkR job."
fn spec.initProvider.jobs.withSparkRJobMixin
withSparkRJobMixin(sparkRJob)
"Optional. Job is a SparkR job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withSparkSqlJob
withSparkSqlJob(sparkSqlJob)
"Optional. Job is a SparkSql job."
fn spec.initProvider.jobs.withSparkSqlJobMixin
withSparkSqlJobMixin(sparkSqlJob)
"Optional. Job is a SparkSql job."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.withStepId
withStepId(stepId)
"Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters."
obj spec.initProvider.jobs.hadoopJob
"Optional. Job is a Hadoop job."
fn spec.initProvider.jobs.hadoopJob.withArchiveUris
withArchiveUris(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
fn spec.initProvider.jobs.hadoopJob.withArchiveUrisMixin
withArchiveUrisMixin(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.hadoopJob.withArgs
withArgs(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
fn spec.initProvider.jobs.hadoopJob.withArgsMixin
withArgsMixin(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.hadoopJob.withFileUris
withFileUris(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
fn spec.initProvider.jobs.hadoopJob.withFileUrisMixin
withFileUrisMixin(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.hadoopJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.initProvider.jobs.hadoopJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.hadoopJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.hadoopJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.hadoopJob.withMainClass
withMainClass(mainClass)
"The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris."
fn spec.initProvider.jobs.hadoopJob.withMainJarFileUri
withMainJarFileUri(mainJarFileUri)
"The HCFS URI of the jar file that contains the main class."
fn spec.initProvider.jobs.hadoopJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.jobs.hadoopJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.hadoopJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.hadoopJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.initProvider.jobs.hadoopJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.hiveJob
"Optional. Job is a Hive job."
fn spec.initProvider.jobs.hiveJob.withContinueOnFailure
withContinueOnFailure(continueOnFailure)
"Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries."
fn spec.initProvider.jobs.hiveJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.initProvider.jobs.hiveJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.hiveJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.jobs.hiveJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.hiveJob.withQueryFileUri
withQueryFileUri(queryFileUri)
"The HCFS URI of the script that contains SQL queries."
fn spec.initProvider.jobs.hiveJob.withQueryList
withQueryList(queryList)
"A list of queries."
fn spec.initProvider.jobs.hiveJob.withQueryListMixin
withQueryListMixin(queryList)
"A list of queries."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.hiveJob.withScriptVariables
withScriptVariables(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
fn spec.initProvider.jobs.hiveJob.withScriptVariablesMixin
withScriptVariablesMixin(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.hiveJob.queryList
"A list of queries."
fn spec.initProvider.jobs.hiveJob.queryList.withQueries
withQueries(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
fn spec.initProvider.jobs.hiveJob.queryList.withQueriesMixin
withQueriesMixin(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.pigJob
"Optional. Job is a Pig job."
fn spec.initProvider.jobs.pigJob.withContinueOnFailure
withContinueOnFailure(continueOnFailure)
"Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries."
fn spec.initProvider.jobs.pigJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.initProvider.jobs.pigJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pigJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.pigJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pigJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.jobs.pigJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pigJob.withQueryFileUri
withQueryFileUri(queryFileUri)
"The HCFS URI of the script that contains SQL queries."
fn spec.initProvider.jobs.pigJob.withQueryList
withQueryList(queryList)
"A list of queries."
fn spec.initProvider.jobs.pigJob.withQueryListMixin
withQueryListMixin(queryList)
"A list of queries."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pigJob.withScriptVariables
withScriptVariables(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
fn spec.initProvider.jobs.pigJob.withScriptVariablesMixin
withScriptVariablesMixin(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.pigJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.pigJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.initProvider.jobs.pigJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.pigJob.queryList
"A list of queries."
fn spec.initProvider.jobs.pigJob.queryList.withQueries
withQueries(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
fn spec.initProvider.jobs.pigJob.queryList.withQueriesMixin
withQueriesMixin(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.prestoJob
"Optional. Job is a Presto job."
fn spec.initProvider.jobs.prestoJob.withClientTags
withClientTags(clientTags)
"Optional. Presto client tags to attach to this query"
fn spec.initProvider.jobs.prestoJob.withClientTagsMixin
withClientTagsMixin(clientTags)
"Optional. Presto client tags to attach to this query"
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.prestoJob.withContinueOnFailure
withContinueOnFailure(continueOnFailure)
"Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries."
fn spec.initProvider.jobs.prestoJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.prestoJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.prestoJob.withOutputFormat
withOutputFormat(outputFormat)
"Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats"
fn spec.initProvider.jobs.prestoJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.jobs.prestoJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.prestoJob.withQueryFileUri
withQueryFileUri(queryFileUri)
"The HCFS URI of the script that contains SQL queries."
fn spec.initProvider.jobs.prestoJob.withQueryList
withQueryList(queryList)
"A list of queries."
fn spec.initProvider.jobs.prestoJob.withQueryListMixin
withQueryListMixin(queryList)
"A list of queries."
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.prestoJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.prestoJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.initProvider.jobs.prestoJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.prestoJob.queryList
"A list of queries."
fn spec.initProvider.jobs.prestoJob.queryList.withQueries
withQueries(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
fn spec.initProvider.jobs.prestoJob.queryList.withQueriesMixin
withQueriesMixin(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.pysparkJob
"Optional. Job is a PySpark job."
fn spec.initProvider.jobs.pysparkJob.withArchiveUris
withArchiveUris(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
fn spec.initProvider.jobs.pysparkJob.withArchiveUrisMixin
withArchiveUrisMixin(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pysparkJob.withArgs
withArgs(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
fn spec.initProvider.jobs.pysparkJob.withArgsMixin
withArgsMixin(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pysparkJob.withFileUris
withFileUris(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
fn spec.initProvider.jobs.pysparkJob.withFileUrisMixin
withFileUrisMixin(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pysparkJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.initProvider.jobs.pysparkJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pysparkJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.pysparkJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pysparkJob.withMainPythonFileUri
withMainPythonFileUri(mainPythonFileUri)
"Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file."
fn spec.initProvider.jobs.pysparkJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.jobs.pysparkJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.pysparkJob.withPythonFileUris
withPythonFileUris(pythonFileUris)
"Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip."
fn spec.initProvider.jobs.pysparkJob.withPythonFileUrisMixin
withPythonFileUrisMixin(pythonFileUris)
"Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip."
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.pysparkJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.pysparkJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.initProvider.jobs.pysparkJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.scheduling
"Optional. Job scheduling configuration."
fn spec.initProvider.jobs.scheduling.withMaxFailuresPerHour
withMaxFailuresPerHour(maxFailuresPerHour)
"Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10."
fn spec.initProvider.jobs.scheduling.withMaxFailuresTotal
withMaxFailuresTotal(maxFailuresTotal)
"Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240"
obj spec.initProvider.jobs.sparkJob
"Optional. Job is a Spark job."
fn spec.initProvider.jobs.sparkJob.withArchiveUris
withArchiveUris(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
fn spec.initProvider.jobs.sparkJob.withArchiveUrisMixin
withArchiveUrisMixin(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkJob.withArgs
withArgs(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
fn spec.initProvider.jobs.sparkJob.withArgsMixin
withArgsMixin(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkJob.withFileUris
withFileUris(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
fn spec.initProvider.jobs.sparkJob.withFileUrisMixin
withFileUrisMixin(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.initProvider.jobs.sparkJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.sparkJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkJob.withMainClass
withMainClass(mainClass)
"The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris."
fn spec.initProvider.jobs.sparkJob.withMainJarFileUri
withMainJarFileUri(mainJarFileUri)
"The HCFS URI of the jar file that contains the main class."
fn spec.initProvider.jobs.sparkJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.jobs.sparkJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.sparkJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.sparkJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.initProvider.jobs.sparkJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.sparkRJob
"Optional. Job is a SparkR job."
fn spec.initProvider.jobs.sparkRJob.withArchiveUris
withArchiveUris(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
fn spec.initProvider.jobs.sparkRJob.withArchiveUrisMixin
withArchiveUrisMixin(archiveUris)
"Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkRJob.withArgs
withArgs(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
fn spec.initProvider.jobs.sparkRJob.withArgsMixin
withArgsMixin(args)
"Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkRJob.withFileUris
withFileUris(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
fn spec.initProvider.jobs.sparkRJob.withFileUrisMixin
withFileUrisMixin(fileUris)
"Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkRJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.sparkRJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkRJob.withMainRFileUri
withMainRFileUri(mainRFileUri)
"Required. The HCFS URI of the main R file to use as the driver. Must be a .R file."
fn spec.initProvider.jobs.sparkRJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.jobs.sparkRJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.sparkRJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.sparkRJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.initProvider.jobs.sparkRJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.sparkSqlJob
"Optional. Job is a SparkSql job."
fn spec.initProvider.jobs.sparkSqlJob.withJarFileUris
withJarFileUris(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
fn spec.initProvider.jobs.sparkSqlJob.withJarFileUrisMixin
withJarFileUrisMixin(jarFileUris)
"Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkSqlJob.withLoggingConfig
withLoggingConfig(loggingConfig)
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.sparkSqlJob.withLoggingConfigMixin
withLoggingConfigMixin(loggingConfig)
"Optional. The runtime log config for job execution."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkSqlJob.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.jobs.sparkSqlJob.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkSqlJob.withQueryFileUri
withQueryFileUri(queryFileUri)
"The HCFS URI of the script that contains SQL queries."
fn spec.initProvider.jobs.sparkSqlJob.withQueryList
withQueryList(queryList)
"A list of queries."
fn spec.initProvider.jobs.sparkSqlJob.withQueryListMixin
withQueryListMixin(queryList)
"A list of queries."
Note: This function appends passed data to existing values
fn spec.initProvider.jobs.sparkSqlJob.withScriptVariables
withScriptVariables(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
fn spec.initProvider.jobs.sparkSqlJob.withScriptVariablesMixin
withScriptVariablesMixin(scriptVariables)
"Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";)."
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.sparkSqlJob.loggingConfig
"Optional. The runtime log config for job execution."
fn spec.initProvider.jobs.sparkSqlJob.loggingConfig.withDriverLogLevels
withDriverLogLevels(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
fn spec.initProvider.jobs.sparkSqlJob.loggingConfig.withDriverLogLevelsMixin
withDriverLogLevelsMixin(driverLogLevels)
"The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'"
Note: This function appends passed data to existing values
obj spec.initProvider.jobs.sparkSqlJob.queryList
"A list of queries."
fn spec.initProvider.jobs.sparkSqlJob.queryList.withQueries
withQueries(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
fn spec.initProvider.jobs.sparkSqlJob.queryList.withQueriesMixin
withQueriesMixin(queries)
"Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": } }"
Note: This function appends passed data to existing values
obj spec.initProvider.parameters
"Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated."
fn spec.initProvider.parameters.withDescription
withDescription(description)
"Optional. Brief description of the parameter. Must not exceed 1024 characters."
fn spec.initProvider.parameters.withFields
withFields(fields)
"Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args"
fn spec.initProvider.parameters.withFieldsMixin
withFieldsMixin(fields)
"Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args"
Note: This function appends passed data to existing values
fn spec.initProvider.parameters.withName
withName(name)
"Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters."
fn spec.initProvider.parameters.withValidation
withValidation(validation)
"Optional. Validation rules to be applied to this parameter's value."
fn spec.initProvider.parameters.withValidationMixin
withValidationMixin(validation)
"Optional. Validation rules to be applied to this parameter's value."
Note: This function appends passed data to existing values
obj spec.initProvider.parameters.validation
"Optional. Validation rules to be applied to this parameter's value."
fn spec.initProvider.parameters.validation.withRegex
withRegex(regex)
"Validation based on regular expressions."
fn spec.initProvider.parameters.validation.withRegexMixin
withRegexMixin(regex)
"Validation based on regular expressions."
Note: This function appends passed data to existing values
fn spec.initProvider.parameters.validation.withValues
withValues(values)
"Required. List of allowed values for the parameter."
fn spec.initProvider.parameters.validation.withValuesMixin
withValuesMixin(values)
"Required. List of allowed values for the parameter."
Note: This function appends passed data to existing values
obj spec.initProvider.parameters.validation.regex
"Validation based on regular expressions."
fn spec.initProvider.parameters.validation.regex.withRegexes
withRegexes(regexes)
"Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient)."
fn spec.initProvider.parameters.validation.regex.withRegexesMixin
withRegexesMixin(regexes)
"Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient)."
Note: This function appends passed data to existing values
obj spec.initProvider.parameters.validation.values
"Required. List of allowed values for the parameter."
fn spec.initProvider.parameters.validation.values.withValues
withValues(values)
"Required. List of allowed values for the parameter."
fn spec.initProvider.parameters.validation.values.withValuesMixin
withValuesMixin(values)
"Required. List of allowed values for the parameter."
Note: This function appends passed data to existing values
obj spec.initProvider.placement
"Required. WorkflowTemplate scheduling information."
fn spec.initProvider.placement.withClusterSelector
withClusterSelector(clusterSelector)
"Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted."
fn spec.initProvider.placement.withClusterSelectorMixin
withClusterSelectorMixin(clusterSelector)
"Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.withManagedCluster
withManagedCluster(managedCluster)
"A cluster that is managed by the workflow."
fn spec.initProvider.placement.withManagedClusterMixin
withManagedClusterMixin(managedCluster)
"A cluster that is managed by the workflow."
Note: This function appends passed data to existing values
obj spec.initProvider.placement.clusterSelector
"Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted."
fn spec.initProvider.placement.clusterSelector.withClusterLabels
withClusterLabels(clusterLabels)
"Required. The cluster labels. Cluster must have all labels to match."
fn spec.initProvider.placement.clusterSelector.withClusterLabelsMixin
withClusterLabelsMixin(clusterLabels)
"Required. The cluster labels. Cluster must have all labels to match."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.clusterSelector.withZone
withZone(zone)
"Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f"
obj spec.initProvider.placement.managedCluster
"A cluster that is managed by the workflow."
fn spec.initProvider.placement.managedCluster.withClusterName
withClusterName(clusterName)
"Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters."
fn spec.initProvider.placement.managedCluster.withConfig
withConfig(config)
"Required. The cluster configuration."
fn spec.initProvider.placement.managedCluster.withConfigMixin
withConfigMixin(config)
"Required. The cluster configuration."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.withLabels
withLabels(labels)
"Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster."
fn spec.initProvider.placement.managedCluster.withLabelsMixin
withLabelsMixin(labels)
"Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster."
Note: This function appends passed data to existing values
obj spec.initProvider.placement.managedCluster.config
"Required. The cluster configuration."
fn spec.initProvider.placement.managedCluster.config.withAutoscalingConfig
withAutoscalingConfig(autoscalingConfig)
"Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset."
fn spec.initProvider.placement.managedCluster.config.withAutoscalingConfigMixin
withAutoscalingConfigMixin(autoscalingConfig)
"Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withEncryptionConfig
withEncryptionConfig(encryptionConfig)
"Optional. Encryption settings for the cluster."
fn spec.initProvider.placement.managedCluster.config.withEncryptionConfigMixin
withEncryptionConfigMixin(encryptionConfig)
"Optional. Encryption settings for the cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withEndpointConfig
withEndpointConfig(endpointConfig)
"Optional. Port/endpoint configuration for this cluster"
fn spec.initProvider.placement.managedCluster.config.withEndpointConfigMixin
withEndpointConfigMixin(endpointConfig)
"Optional. Port/endpoint configuration for this cluster"
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withGceClusterConfig
withGceClusterConfig(gceClusterConfig)
"Optional. The shared Compute Engine config settings for all instances in a cluster."
fn spec.initProvider.placement.managedCluster.config.withGceClusterConfigMixin
withGceClusterConfigMixin(gceClusterConfig)
"Optional. The shared Compute Engine config settings for all instances in a cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withInitializationActions
withInitializationActions(initializationActions)
"Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi"
fn spec.initProvider.placement.managedCluster.config.withInitializationActionsMixin
withInitializationActionsMixin(initializationActions)
"Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi"
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withLifecycleConfig
withLifecycleConfig(lifecycleConfig)
"Optional. Lifecycle setting for the cluster."
fn spec.initProvider.placement.managedCluster.config.withLifecycleConfigMixin
withLifecycleConfigMixin(lifecycleConfig)
"Optional. Lifecycle setting for the cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withMasterConfig
withMasterConfig(masterConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.initProvider.placement.managedCluster.config.withMasterConfigMixin
withMasterConfigMixin(masterConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withSecondaryWorkerConfig
withSecondaryWorkerConfig(secondaryWorkerConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.initProvider.placement.managedCluster.config.withSecondaryWorkerConfigMixin
withSecondaryWorkerConfigMixin(secondaryWorkerConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withSecurityConfig
withSecurityConfig(securityConfig)
"Optional. Security settings for the cluster."
fn spec.initProvider.placement.managedCluster.config.withSecurityConfigMixin
withSecurityConfigMixin(securityConfig)
"Optional. Security settings for the cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withSoftwareConfig
withSoftwareConfig(softwareConfig)
"Optional. The config settings for software inside the cluster."
fn spec.initProvider.placement.managedCluster.config.withSoftwareConfigMixin
withSoftwareConfigMixin(softwareConfig)
"Optional. The config settings for software inside the cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.withStagingBucket
withStagingBucket(stagingBucket)
"Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket))."
fn spec.initProvider.placement.managedCluster.config.withTempBucket
withTempBucket(tempBucket)
"Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket."
fn spec.initProvider.placement.managedCluster.config.withWorkerConfig
withWorkerConfig(workerConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.initProvider.placement.managedCluster.config.withWorkerConfigMixin
withWorkerConfigMixin(workerConfig)
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
Note: This function appends passed data to existing values
obj spec.initProvider.placement.managedCluster.config.autoscalingConfig
"Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset."
fn spec.initProvider.placement.managedCluster.config.autoscalingConfig.withPolicy
withPolicy(policy)
"Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region."
obj spec.initProvider.placement.managedCluster.config.encryptionConfig
"Optional. Encryption settings for the cluster."
fn spec.initProvider.placement.managedCluster.config.encryptionConfig.withGcePdKmsKeyName
withGcePdKmsKeyName(gcePdKmsKeyName)
"Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster."
obj spec.initProvider.placement.managedCluster.config.endpointConfig
"Optional. Port/endpoint configuration for this cluster"
fn spec.initProvider.placement.managedCluster.config.endpointConfig.withEnableHttpPortAccess
withEnableHttpPortAccess(enableHttpPortAccess)
"Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false."
obj spec.initProvider.placement.managedCluster.config.gceClusterConfig
"Optional. The shared Compute Engine config settings for all instances in a cluster."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withInternalIpOnly
withInternalIpOnly(internalIpOnly)
"Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withMetadata
withMetadata(metadata)
"The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata))."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withMetadataMixin
withMetadataMixin(metadata)
"The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata))."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withNetwork
withNetwork(network)
"Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see /regions/global/default*default`"
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withNodeGroupAffinity
withNodeGroupAffinity(nodeGroupAffinity)
"Optional. Node Group Affinity for sole-tenant clusters."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withNodeGroupAffinityMixin
withNodeGroupAffinityMixin(nodeGroupAffinity)
"Optional. Node Group Affinity for sole-tenant clusters."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withPrivateIpv6GoogleAccess
withPrivateIpv6GoogleAccess(privateIpv6GoogleAccess)
"Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL"
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withReservationAffinity
withReservationAffinity(reservationAffinity)
"Optional. Reservation Affinity for consuming Zonal reservation."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withReservationAffinityMixin
withReservationAffinityMixin(reservationAffinity)
"Optional. Reservation Affinity for consuming Zonal reservation."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withServiceAccount
withServiceAccount(serviceAccount)
"Optional. The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withServiceAccountScopes
withServiceAccountScopes(serviceAccountScopes)
"Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control"
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withServiceAccountScopesMixin
withServiceAccountScopesMixin(serviceAccountScopes)
"Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control"
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withShieldedInstanceConfig
withShieldedInstanceConfig(shieldedInstanceConfig)
"Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withShieldedInstanceConfigMixin
withShieldedInstanceConfigMixin(shieldedInstanceConfig)
"Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withSubnetwork
withSubnetwork(subnetwork)
"Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0"
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withTags
withTags(tags)
"The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags))."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withTagsMixin
withTagsMixin(tags)
"The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags))."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.withZone
withZone(zone)
"Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f"
obj spec.initProvider.placement.managedCluster.config.gceClusterConfig.nodeGroupAffinity
"Optional. Node Group Affinity for sole-tenant clusters."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.nodeGroupAffinity.withNodeGroup
withNodeGroup(nodeGroup)
"Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1`"
obj spec.initProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity
"Optional. Reservation Affinity for consuming Zonal reservation."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity.withConsumeReservationType
withConsumeReservationType(consumeReservationType)
"Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION"
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity.withKey
withKey(key)
"Optional. Corresponds to the label key of reservation resource."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity.withValues
withValues(values)
"Required. List of allowed values for the parameter."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.reservationAffinity.withValuesMixin
withValuesMixin(values)
"Required. List of allowed values for the parameter."
Note: This function appends passed data to existing values
obj spec.initProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig
"Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig.withEnableIntegrityMonitoring
withEnableIntegrityMonitoring(enableIntegrityMonitoring)
"Optional. Defines whether instances have Integrity Monitoring enabled."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig.withEnableSecureBoot
withEnableSecureBoot(enableSecureBoot)
"Optional. Defines whether instances have Secure Boot enabled."
fn spec.initProvider.placement.managedCluster.config.gceClusterConfig.shieldedInstanceConfig.withEnableVtpm
withEnableVtpm(enableVtpm)
"Optional. Defines whether instances have the vTPM enabled."
obj spec.initProvider.placement.managedCluster.config.initializationActions
"Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi"
fn spec.initProvider.placement.managedCluster.config.initializationActions.withExecutableFile
withExecutableFile(executableFile)
"Required. Cloud Storage URI of executable file."
fn spec.initProvider.placement.managedCluster.config.initializationActions.withExecutionTimeout
withExecutionTimeout(executionTimeout)
"Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period."
obj spec.initProvider.placement.managedCluster.config.lifecycleConfig
"Optional. Lifecycle setting for the cluster."
fn spec.initProvider.placement.managedCluster.config.lifecycleConfig.withAutoDeleteTime
withAutoDeleteTime(autoDeleteTime)
"Optional. The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json))."
fn spec.initProvider.placement.managedCluster.config.lifecycleConfig.withAutoDeleteTtl
withAutoDeleteTtl(autoDeleteTtl)
"Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json))."
fn spec.initProvider.placement.managedCluster.config.lifecycleConfig.withIdleDeleteTtl
withIdleDeleteTtl(idleDeleteTtl)
"Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)."
obj spec.initProvider.placement.managedCluster.config.masterConfig
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.initProvider.placement.managedCluster.config.masterConfig.withAccelerators
withAccelerators(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.initProvider.placement.managedCluster.config.masterConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.masterConfig.withDiskConfig
withDiskConfig(diskConfig)
"Optional. Disk option config settings."
fn spec.initProvider.placement.managedCluster.config.masterConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Optional. Disk option config settings."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.masterConfig.withImage
withImage(image)
"Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default."
fn spec.initProvider.placement.managedCluster.config.masterConfig.withMachineType
withMachineType(machineType)
"Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`."
fn spec.initProvider.placement.managedCluster.config.masterConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu)."
fn spec.initProvider.placement.managedCluster.config.masterConfig.withNumInstances
withNumInstances(numInstances)
"Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1."
fn spec.initProvider.placement.managedCluster.config.masterConfig.withPreemptibility
withPreemptibility(preemptibility)
"Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE"
obj spec.initProvider.placement.managedCluster.config.masterConfig.accelerators
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.initProvider.placement.managedCluster.config.masterConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance."
fn spec.initProvider.placement.managedCluster.config.masterConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80."
obj spec.initProvider.placement.managedCluster.config.masterConfig.diskConfig
"Optional. Disk option config settings."
fn spec.initProvider.placement.managedCluster.config.masterConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Optional. Size in GB of the boot disk (default is 500GB)."
fn spec.initProvider.placement.managedCluster.config.masterConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-ssd\" (Persistent Disk Solid State Drive) or \"pd-standard\" (Persistent Disk Hard Disk Drive)."
fn spec.initProvider.placement.managedCluster.config.masterConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries."
obj spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withAccelerators
withAccelerators(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withDiskConfig
withDiskConfig(diskConfig)
"Optional. Disk option config settings."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Optional. Disk option config settings."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withImage
withImage(image)
"Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withMachineType
withMachineType(machineType)
"Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu)."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withNumInstances
withNumInstances(numInstances)
"Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.withPreemptibility
withPreemptibility(preemptibility)
"Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE"
obj spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.accelerators
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80."
obj spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig
"Optional. Disk option config settings."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Optional. Size in GB of the boot disk (default is 500GB)."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-ssd\" (Persistent Disk Solid State Drive) or \"pd-standard\" (Persistent Disk Hard Disk Drive)."
fn spec.initProvider.placement.managedCluster.config.secondaryWorkerConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries."
obj spec.initProvider.placement.managedCluster.config.securityConfig
"Optional. Security settings for the cluster."
fn spec.initProvider.placement.managedCluster.config.securityConfig.withKerberosConfig
withKerberosConfig(kerberosConfig)
"Kerberos related configuration."
fn spec.initProvider.placement.managedCluster.config.securityConfig.withKerberosConfigMixin
withKerberosConfigMixin(kerberosConfig)
"Kerberos related configuration."
Note: This function appends passed data to existing values
obj spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig
"Kerberos related configuration."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withCrossRealmTrustAdminServer
withCrossRealmTrustAdminServer(crossRealmTrustAdminServer)
"Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withCrossRealmTrustKdc
withCrossRealmTrustKdc(crossRealmTrustKdc)
"Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withCrossRealmTrustRealm
withCrossRealmTrustRealm(crossRealmTrustRealm)
"Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withCrossRealmTrustSharedPassword
withCrossRealmTrustSharedPassword(crossRealmTrustSharedPassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withEnableKerberos
withEnableKerberos(enableKerberos)
"Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKdcDbKey
withKdcDbKey(kdcDbKey)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKeyPassword
withKeyPassword(keyPassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKeystore
withKeystore(keystore)
"Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKeystorePassword
withKeystorePassword(keystorePassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withKmsKey
withKmsKey(kmsKey)
"Optional. The uri of the KMS key used to encrypt various sensitive files."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withRealm
withRealm(realm)
"Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withRootPrincipalPassword
withRootPrincipalPassword(rootPrincipalPassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withTgtLifetimeHours
withTgtLifetimeHours(tgtLifetimeHours)
"Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withTruststore
withTruststore(truststore)
"Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate."
fn spec.initProvider.placement.managedCluster.config.securityConfig.kerberosConfig.withTruststorePassword
withTruststorePassword(truststorePassword)
"Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc."
obj spec.initProvider.placement.managedCluster.config.softwareConfig
"Optional. The config settings for software inside the cluster."
fn spec.initProvider.placement.managedCluster.config.softwareConfig.withImageVersion
withImageVersion(imageVersion)
"Optional. The version of software inside the cluster. It must be one of the supported (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version."
fn spec.initProvider.placement.managedCluster.config.softwareConfig.withOptionalComponents
withOptionalComponents(optionalComponents)
fn spec.initProvider.placement.managedCluster.config.softwareConfig.withOptionalComponentsMixin
withOptionalComponentsMixin(optionalComponents)
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.softwareConfig.withProperties
withProperties(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
fn spec.initProvider.placement.managedCluster.config.softwareConfig.withPropertiesMixin
withPropertiesMixin(properties)
"Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten."
Note: This function appends passed data to existing values
obj spec.initProvider.placement.managedCluster.config.workerConfig
"Optional. The Compute Engine config settings for additional worker instances in a cluster."
fn spec.initProvider.placement.managedCluster.config.workerConfig.withAccelerators
withAccelerators(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.initProvider.placement.managedCluster.config.workerConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"Optional. The Compute Engine accelerator configuration for these instances."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.workerConfig.withDiskConfig
withDiskConfig(diskConfig)
"Optional. Disk option config settings."
fn spec.initProvider.placement.managedCluster.config.workerConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Optional. Disk option config settings."
Note: This function appends passed data to existing values
fn spec.initProvider.placement.managedCluster.config.workerConfig.withImage
withImage(image)
"Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default."
fn spec.initProvider.placement.managedCluster.config.workerConfig.withMachineType
withMachineType(machineType)
"Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`."
fn spec.initProvider.placement.managedCluster.config.workerConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu)."
fn spec.initProvider.placement.managedCluster.config.workerConfig.withNumInstances
withNumInstances(numInstances)
"Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1."
fn spec.initProvider.placement.managedCluster.config.workerConfig.withPreemptibility
withPreemptibility(preemptibility)
"Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE"
obj spec.initProvider.placement.managedCluster.config.workerConfig.accelerators
"Optional. The Compute Engine accelerator configuration for these instances."
fn spec.initProvider.placement.managedCluster.config.workerConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance."
fn spec.initProvider.placement.managedCluster.config.workerConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80."
obj spec.initProvider.placement.managedCluster.config.workerConfig.diskConfig
"Optional. Disk option config settings."
fn spec.initProvider.placement.managedCluster.config.workerConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Optional. Size in GB of the boot disk (default is 500GB)."
fn spec.initProvider.placement.managedCluster.config.workerConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-ssd\" (Persistent Disk Solid State Drive) or \"pd-standard\" (Persistent Disk Hard Disk Drive)."
fn spec.initProvider.placement.managedCluster.config.workerConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries."
obj spec.providerConfigRef
"ProviderConfigReference specifies how the provider that will be used to create, observe, update, and delete this managed resource should be configured."
fn spec.providerConfigRef.withName
withName(name)
"Name of the referenced object."
obj spec.providerConfigRef.policy
"Policies for referencing."
fn spec.providerConfigRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.providerConfigRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.providerRef
"ProviderReference specifies the provider that will be used to create, observe, update, and delete this managed resource. Deprecated: Please use ProviderConfigReference, i.e. providerConfigRef
"
fn spec.providerRef.withName
withName(name)
"Name of the referenced object."
obj spec.providerRef.policy
"Policies for referencing."
fn spec.providerRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.providerRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.publishConnectionDetailsTo
"PublishConnectionDetailsTo specifies the connection secret config which contains a name, metadata and a reference to secret store config to which any connection details for this managed resource should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource."
fn spec.publishConnectionDetailsTo.withName
withName(name)
"Name is the name of the connection secret."
obj spec.publishConnectionDetailsTo.configRef
"SecretStoreConfigRef specifies which secret store config should be used for this ConnectionSecret."
fn spec.publishConnectionDetailsTo.configRef.withName
withName(name)
"Name of the referenced object."
obj spec.publishConnectionDetailsTo.configRef.policy
"Policies for referencing."
fn spec.publishConnectionDetailsTo.configRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.publishConnectionDetailsTo.configRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.publishConnectionDetailsTo.metadata
"Metadata is the metadata for connection secret."
fn spec.publishConnectionDetailsTo.metadata.withAnnotations
withAnnotations(annotations)
"Annotations are the annotations to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.annotations\". - It is up to Secret Store implementation for others store types."
fn spec.publishConnectionDetailsTo.metadata.withAnnotationsMixin
withAnnotationsMixin(annotations)
"Annotations are the annotations to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.annotations\". - It is up to Secret Store implementation for others store types."
Note: This function appends passed data to existing values
fn spec.publishConnectionDetailsTo.metadata.withLabels
withLabels(labels)
"Labels are the labels/tags to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.labels\". - It is up to Secret Store implementation for others store types."
fn spec.publishConnectionDetailsTo.metadata.withLabelsMixin
withLabelsMixin(labels)
"Labels are the labels/tags to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.labels\". - It is up to Secret Store implementation for others store types."
Note: This function appends passed data to existing values
fn spec.publishConnectionDetailsTo.metadata.withType
withType(type)
"Type is the SecretType for the connection secret. - Only valid for Kubernetes Secret Stores."
obj spec.writeConnectionSecretToRef
"WriteConnectionSecretToReference specifies the namespace and name of a Secret to which any connection details for this managed resource should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource. This field is planned to be replaced in a future release in favor of PublishConnectionDetailsTo. Currently, both could be set independently and connection details would be published to both without affecting each other."
fn spec.writeConnectionSecretToRef.withName
withName(name)
"Name of the secret."
fn spec.writeConnectionSecretToRef.withNamespace
withNamespace(namespace)
"Namespace of the secret."