dataproc.v1beta1.cluster
"Cluster is the Schema for the Clusters API. Manages a Cloud Dataproc cluster resource."
Index
fn new(name)
obj metadata
fn withAnnotations(annotations)
fn withAnnotationsMixin(annotations)
fn withClusterName(clusterName)
fn withCreationTimestamp(creationTimestamp)
fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)
fn withDeletionTimestamp(deletionTimestamp)
fn withFinalizers(finalizers)
fn withFinalizersMixin(finalizers)
fn withGenerateName(generateName)
fn withGeneration(generation)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withName(name)
fn withNamespace(namespace)
fn withOwnerReferences(ownerReferences)
fn withOwnerReferencesMixin(ownerReferences)
fn withResourceVersion(resourceVersion)
fn withSelfLink(selfLink)
fn withUid(uid)
obj spec
fn withDeletionPolicy(deletionPolicy)
fn withManagementPolicies(managementPolicies)
fn withManagementPoliciesMixin(managementPolicies)
obj spec.forProvider
fn withClusterConfig(clusterConfig)
fn withClusterConfigMixin(clusterConfig)
fn withGracefulDecommissionTimeout(gracefulDecommissionTimeout)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withName(name)
fn withProject(project)
fn withRegion(region)
fn withVirtualClusterConfig(virtualClusterConfig)
fn withVirtualClusterConfigMixin(virtualClusterConfig)
obj spec.forProvider.clusterConfig
fn withAutoscalingConfig(autoscalingConfig)
fn withAutoscalingConfigMixin(autoscalingConfig)
fn withDataprocMetricConfig(dataprocMetricConfig)
fn withDataprocMetricConfigMixin(dataprocMetricConfig)
fn withEncryptionConfig(encryptionConfig)
fn withEncryptionConfigMixin(encryptionConfig)
fn withEndpointConfig(endpointConfig)
fn withEndpointConfigMixin(endpointConfig)
fn withGceClusterConfig(gceClusterConfig)
fn withGceClusterConfigMixin(gceClusterConfig)
fn withInitializationAction(initializationAction)
fn withInitializationActionMixin(initializationAction)
fn withLifecycleConfig(lifecycleConfig)
fn withLifecycleConfigMixin(lifecycleConfig)
fn withMasterConfig(masterConfig)
fn withMasterConfigMixin(masterConfig)
fn withMetastoreConfig(metastoreConfig)
fn withMetastoreConfigMixin(metastoreConfig)
fn withPreemptibleWorkerConfig(preemptibleWorkerConfig)
fn withPreemptibleWorkerConfigMixin(preemptibleWorkerConfig)
fn withSecurityConfig(securityConfig)
fn withSecurityConfigMixin(securityConfig)
fn withSoftwareConfig(softwareConfig)
fn withSoftwareConfigMixin(softwareConfig)
fn withStagingBucket(stagingBucket)
fn withTempBucket(tempBucket)
fn withWorkerConfig(workerConfig)
fn withWorkerConfigMixin(workerConfig)
obj spec.forProvider.clusterConfig.autoscalingConfig
obj spec.forProvider.clusterConfig.dataprocMetricConfig
obj spec.forProvider.clusterConfig.encryptionConfig
obj spec.forProvider.clusterConfig.endpointConfig
obj spec.forProvider.clusterConfig.gceClusterConfig
fn withInternalIpOnly(internalIpOnly)
fn withMetadata(metadata)
fn withMetadataMixin(metadata)
fn withNetwork(network)
fn withNodeGroupAffinity(nodeGroupAffinity)
fn withNodeGroupAffinityMixin(nodeGroupAffinity)
fn withReservationAffinity(reservationAffinity)
fn withReservationAffinityMixin(reservationAffinity)
fn withServiceAccount(serviceAccount)
fn withServiceAccountScopes(serviceAccountScopes)
fn withServiceAccountScopesMixin(serviceAccountScopes)
fn withShieldedInstanceConfig(shieldedInstanceConfig)
fn withShieldedInstanceConfigMixin(shieldedInstanceConfig)
fn withSubnetwork(subnetwork)
fn withTags(tags)
fn withTagsMixin(tags)
fn withZone(zone)
obj spec.forProvider.clusterConfig.gceClusterConfig.nodeGroupAffinity
obj spec.forProvider.clusterConfig.gceClusterConfig.reservationAffinity
obj spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountRef
obj spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountSelector
obj spec.forProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig
obj spec.forProvider.clusterConfig.initializationAction
obj spec.forProvider.clusterConfig.lifecycleConfig
obj spec.forProvider.clusterConfig.masterConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImageUri(imageUri)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
obj spec.forProvider.clusterConfig.masterConfig.accelerators
obj spec.forProvider.clusterConfig.masterConfig.diskConfig
obj spec.forProvider.clusterConfig.metastoreConfig
obj spec.forProvider.clusterConfig.preemptibleWorkerConfig
obj spec.forProvider.clusterConfig.securityConfig
fn withKerberosConfig(kerberosConfig)
fn withKerberosConfigMixin(kerberosConfig)
obj spec.forProvider.clusterConfig.securityConfig.kerberosConfig
fn withCrossRealmTrustAdminServer(crossRealmTrustAdminServer)
fn withCrossRealmTrustKdc(crossRealmTrustKdc)
fn withCrossRealmTrustRealm(crossRealmTrustRealm)
fn withCrossRealmTrustSharedPasswordUri(crossRealmTrustSharedPasswordUri)
fn withEnableKerberos(enableKerberos)
fn withKdcDbKeyUri(kdcDbKeyUri)
fn withKeyPasswordUri(keyPasswordUri)
fn withKeystorePasswordUri(keystorePasswordUri)
fn withKeystoreUri(keystoreUri)
fn withKmsKeyUri(kmsKeyUri)
fn withRealm(realm)
fn withRootPrincipalPasswordUri(rootPrincipalPasswordUri)
fn withTgtLifetimeHours(tgtLifetimeHours)
fn withTruststorePasswordUri(truststorePasswordUri)
fn withTruststoreUri(truststoreUri)
obj spec.forProvider.clusterConfig.softwareConfig
obj spec.forProvider.clusterConfig.workerConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImageUri(imageUri)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
obj spec.forProvider.clusterConfig.workerConfig.accelerators
obj spec.forProvider.clusterConfig.workerConfig.diskConfig
obj spec.forProvider.virtualClusterConfig
fn withAuxiliaryServicesConfig(auxiliaryServicesConfig)
fn withAuxiliaryServicesConfigMixin(auxiliaryServicesConfig)
fn withKubernetesClusterConfig(kubernetesClusterConfig)
fn withKubernetesClusterConfigMixin(kubernetesClusterConfig)
fn withStagingBucket(stagingBucket)
obj spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig
fn withMetastoreConfig(metastoreConfig)
fn withMetastoreConfigMixin(metastoreConfig)
fn withSparkHistoryServerConfig(sparkHistoryServerConfig)
fn withSparkHistoryServerConfigMixin(sparkHistoryServerConfig)
obj spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.metastoreConfig
obj spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.sparkHistoryServerConfig
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig
fn withGkeClusterConfig(gkeClusterConfig)
fn withGkeClusterConfigMixin(gkeClusterConfig)
fn withKubernetesNamespace(kubernetesNamespace)
fn withKubernetesSoftwareConfig(kubernetesSoftwareConfig)
fn withKubernetesSoftwareConfigMixin(kubernetesSoftwareConfig)
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig
fn withGkeClusterTarget(gkeClusterTarget)
fn withNodePoolTarget(nodePoolTarget)
fn withNodePoolTargetMixin(nodePoolTarget)
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget
fn withNodePool(nodePool)
fn withNodePoolConfig(nodePoolConfig)
fn withNodePoolConfigMixin(nodePoolConfig)
fn withRoles(roles)
fn withRolesMixin(roles)
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig
fn withAutoscaling(autoscaling)
fn withAutoscalingMixin(autoscaling)
fn withConfig(config)
fn withConfigMixin(config)
fn withLocations(locations)
fn withLocationsMixin(locations)
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.autoscaling
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig
obj spec.initProvider
fn withClusterConfig(clusterConfig)
fn withClusterConfigMixin(clusterConfig)
fn withGracefulDecommissionTimeout(gracefulDecommissionTimeout)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withName(name)
fn withProject(project)
fn withRegion(region)
fn withVirtualClusterConfig(virtualClusterConfig)
fn withVirtualClusterConfigMixin(virtualClusterConfig)
obj spec.initProvider.clusterConfig
fn withAutoscalingConfig(autoscalingConfig)
fn withAutoscalingConfigMixin(autoscalingConfig)
fn withDataprocMetricConfig(dataprocMetricConfig)
fn withDataprocMetricConfigMixin(dataprocMetricConfig)
fn withEncryptionConfig(encryptionConfig)
fn withEncryptionConfigMixin(encryptionConfig)
fn withEndpointConfig(endpointConfig)
fn withEndpointConfigMixin(endpointConfig)
fn withGceClusterConfig(gceClusterConfig)
fn withGceClusterConfigMixin(gceClusterConfig)
fn withInitializationAction(initializationAction)
fn withInitializationActionMixin(initializationAction)
fn withLifecycleConfig(lifecycleConfig)
fn withLifecycleConfigMixin(lifecycleConfig)
fn withMasterConfig(masterConfig)
fn withMasterConfigMixin(masterConfig)
fn withMetastoreConfig(metastoreConfig)
fn withMetastoreConfigMixin(metastoreConfig)
fn withPreemptibleWorkerConfig(preemptibleWorkerConfig)
fn withPreemptibleWorkerConfigMixin(preemptibleWorkerConfig)
fn withSecurityConfig(securityConfig)
fn withSecurityConfigMixin(securityConfig)
fn withSoftwareConfig(softwareConfig)
fn withSoftwareConfigMixin(softwareConfig)
fn withStagingBucket(stagingBucket)
fn withTempBucket(tempBucket)
fn withWorkerConfig(workerConfig)
fn withWorkerConfigMixin(workerConfig)
obj spec.initProvider.clusterConfig.autoscalingConfig
obj spec.initProvider.clusterConfig.dataprocMetricConfig
obj spec.initProvider.clusterConfig.encryptionConfig
obj spec.initProvider.clusterConfig.endpointConfig
obj spec.initProvider.clusterConfig.gceClusterConfig
fn withInternalIpOnly(internalIpOnly)
fn withMetadata(metadata)
fn withMetadataMixin(metadata)
fn withNetwork(network)
fn withNodeGroupAffinity(nodeGroupAffinity)
fn withNodeGroupAffinityMixin(nodeGroupAffinity)
fn withReservationAffinity(reservationAffinity)
fn withReservationAffinityMixin(reservationAffinity)
fn withServiceAccountScopes(serviceAccountScopes)
fn withServiceAccountScopesMixin(serviceAccountScopes)
fn withShieldedInstanceConfig(shieldedInstanceConfig)
fn withShieldedInstanceConfigMixin(shieldedInstanceConfig)
fn withSubnetwork(subnetwork)
fn withTags(tags)
fn withTagsMixin(tags)
fn withZone(zone)
obj spec.initProvider.clusterConfig.gceClusterConfig.nodeGroupAffinity
obj spec.initProvider.clusterConfig.gceClusterConfig.reservationAffinity
obj spec.initProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig
obj spec.initProvider.clusterConfig.initializationAction
obj spec.initProvider.clusterConfig.lifecycleConfig
obj spec.initProvider.clusterConfig.masterConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImageUri(imageUri)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
obj spec.initProvider.clusterConfig.masterConfig.accelerators
obj spec.initProvider.clusterConfig.masterConfig.diskConfig
obj spec.initProvider.clusterConfig.metastoreConfig
obj spec.initProvider.clusterConfig.preemptibleWorkerConfig
obj spec.initProvider.clusterConfig.securityConfig
fn withKerberosConfig(kerberosConfig)
fn withKerberosConfigMixin(kerberosConfig)
obj spec.initProvider.clusterConfig.securityConfig.kerberosConfig
fn withCrossRealmTrustAdminServer(crossRealmTrustAdminServer)
fn withCrossRealmTrustKdc(crossRealmTrustKdc)
fn withCrossRealmTrustRealm(crossRealmTrustRealm)
fn withCrossRealmTrustSharedPasswordUri(crossRealmTrustSharedPasswordUri)
fn withEnableKerberos(enableKerberos)
fn withKdcDbKeyUri(kdcDbKeyUri)
fn withKeyPasswordUri(keyPasswordUri)
fn withKeystorePasswordUri(keystorePasswordUri)
fn withKeystoreUri(keystoreUri)
fn withKmsKeyUri(kmsKeyUri)
fn withRealm(realm)
fn withRootPrincipalPasswordUri(rootPrincipalPasswordUri)
fn withTgtLifetimeHours(tgtLifetimeHours)
fn withTruststorePasswordUri(truststorePasswordUri)
fn withTruststoreUri(truststoreUri)
obj spec.initProvider.clusterConfig.softwareConfig
obj spec.initProvider.clusterConfig.workerConfig
fn withAccelerators(accelerators)
fn withAcceleratorsMixin(accelerators)
fn withDiskConfig(diskConfig)
fn withDiskConfigMixin(diskConfig)
fn withImageUri(imageUri)
fn withMachineType(machineType)
fn withMinCpuPlatform(minCpuPlatform)
fn withNumInstances(numInstances)
obj spec.initProvider.clusterConfig.workerConfig.accelerators
obj spec.initProvider.clusterConfig.workerConfig.diskConfig
obj spec.initProvider.virtualClusterConfig
fn withAuxiliaryServicesConfig(auxiliaryServicesConfig)
fn withAuxiliaryServicesConfigMixin(auxiliaryServicesConfig)
fn withKubernetesClusterConfig(kubernetesClusterConfig)
fn withKubernetesClusterConfigMixin(kubernetesClusterConfig)
fn withStagingBucket(stagingBucket)
obj spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig
fn withMetastoreConfig(metastoreConfig)
fn withMetastoreConfigMixin(metastoreConfig)
fn withSparkHistoryServerConfig(sparkHistoryServerConfig)
fn withSparkHistoryServerConfigMixin(sparkHistoryServerConfig)
obj spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.metastoreConfig
obj spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.sparkHistoryServerConfig
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig
fn withGkeClusterConfig(gkeClusterConfig)
fn withGkeClusterConfigMixin(gkeClusterConfig)
fn withKubernetesNamespace(kubernetesNamespace)
fn withKubernetesSoftwareConfig(kubernetesSoftwareConfig)
fn withKubernetesSoftwareConfigMixin(kubernetesSoftwareConfig)
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig
fn withGkeClusterTarget(gkeClusterTarget)
fn withNodePoolTarget(nodePoolTarget)
fn withNodePoolTargetMixin(nodePoolTarget)
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget
fn withNodePool(nodePool)
fn withNodePoolConfig(nodePoolConfig)
fn withNodePoolConfigMixin(nodePoolConfig)
fn withRoles(roles)
fn withRolesMixin(roles)
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig
fn withAutoscaling(autoscaling)
fn withAutoscalingMixin(autoscaling)
fn withConfig(config)
fn withConfigMixin(config)
fn withLocations(locations)
fn withLocationsMixin(locations)
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.autoscaling
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig
obj spec.providerConfigRef
obj spec.providerRef
obj spec.publishConnectionDetailsTo
obj spec.writeConnectionSecretToRef
Fields
fn new
new(name)
new returns an instance of Cluster
obj metadata
"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."
fn metadata.withAnnotations
withAnnotations(annotations)
"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"
fn metadata.withAnnotationsMixin
withAnnotationsMixin(annotations)
"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"
Note: This function appends passed data to existing values
fn metadata.withClusterName
withClusterName(clusterName)
"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."
fn metadata.withCreationTimestamp
withCreationTimestamp(creationTimestamp)
"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."
fn metadata.withDeletionGracePeriodSeconds
withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)
"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."
fn metadata.withDeletionTimestamp
withDeletionTimestamp(deletionTimestamp)
"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."
fn metadata.withFinalizers
withFinalizers(finalizers)
"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."
fn metadata.withFinalizersMixin
withFinalizersMixin(finalizers)
"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."
Note: This function appends passed data to existing values
fn metadata.withGenerateName
withGenerateName(generateName)
"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"
fn metadata.withGeneration
withGeneration(generation)
"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."
fn metadata.withLabels
withLabels(labels)
"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"
fn metadata.withLabelsMixin
withLabelsMixin(labels)
"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"
Note: This function appends passed data to existing values
fn metadata.withName
withName(name)
"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"
fn metadata.withNamespace
withNamespace(namespace)
"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"
fn metadata.withOwnerReferences
withOwnerReferences(ownerReferences)
"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."
fn metadata.withOwnerReferencesMixin
withOwnerReferencesMixin(ownerReferences)
"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."
Note: This function appends passed data to existing values
fn metadata.withResourceVersion
withResourceVersion(resourceVersion)
"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"
fn metadata.withSelfLink
withSelfLink(selfLink)
"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."
fn metadata.withUid
withUid(uid)
"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
obj spec
"ClusterSpec defines the desired state of Cluster"
fn spec.withDeletionPolicy
withDeletionPolicy(deletionPolicy)
"DeletionPolicy specifies what will happen to the underlying external when this managed resource is deleted - either \"Delete\" or \"Orphan\" the external resource. This field is planned to be deprecated in favor of the ManagementPolicies field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223"
fn spec.withManagementPolicies
withManagementPolicies(managementPolicies)
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. ManagementPolicies specify the array of actions Crossplane is allowed to take on the managed and external resources. This field is planned to replace the DeletionPolicy field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. If both are custom, the DeletionPolicy field will be ignored. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md"
fn spec.withManagementPoliciesMixin
withManagementPoliciesMixin(managementPolicies)
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. ManagementPolicies specify the array of actions Crossplane is allowed to take on the managed and external resources. This field is planned to replace the DeletionPolicy field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. If both are custom, the DeletionPolicy field will be ignored. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md"
Note: This function appends passed data to existing values
obj spec.forProvider
fn spec.forProvider.withClusterConfig
withClusterConfig(clusterConfig)
"Allows you to configure various aspects of the cluster. Structure defined below."
fn spec.forProvider.withClusterConfigMixin
withClusterConfigMixin(clusterConfig)
"Allows you to configure various aspects of the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.withGracefulDecommissionTimeout
withGracefulDecommissionTimeout(gracefulDecommissionTimeout)
"Does not affect auto scaling decomissioning from an autoscaling policy. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration). Only supported on Dataproc image versions 1.2 and higher. For more context see the docs"
fn spec.forProvider.withLabels
withLabels(labels)
"The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster."
fn spec.forProvider.withLabelsMixin
withLabelsMixin(labels)
"The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster."
Note: This function appends passed data to existing values
fn spec.forProvider.withName
withName(name)
"The name of the cluster, unique within the project and zone."
fn spec.forProvider.withProject
withProject(project)
"The ID of the project in which the cluster will exist. If it is not provided, the provider project is used."
fn spec.forProvider.withRegion
withRegion(region)
"The region in which the cluster and associated nodes will be created in. Defaults to global."
fn spec.forProvider.withVirtualClusterConfig
withVirtualClusterConfig(virtualClusterConfig)
"Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below."
fn spec.forProvider.withVirtualClusterConfigMixin
withVirtualClusterConfigMixin(virtualClusterConfig)
"Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below."
Note: This function appends passed data to existing values
obj spec.forProvider.clusterConfig
"Allows you to configure various aspects of the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withAutoscalingConfig
withAutoscalingConfig(autoscalingConfig)
"The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = \"\", rather than removing the whole block. Structure defined below."
fn spec.forProvider.clusterConfig.withAutoscalingConfigMixin
withAutoscalingConfigMixin(autoscalingConfig)
"The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = \"\", rather than removing the whole block. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withDataprocMetricConfig
withDataprocMetricConfig(dataprocMetricConfig)
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below."
fn spec.forProvider.clusterConfig.withDataprocMetricConfigMixin
withDataprocMetricConfigMixin(dataprocMetricConfig)
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withEncryptionConfig
withEncryptionConfig(encryptionConfig)
"The Customer managed encryption keys settings for the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withEncryptionConfigMixin
withEncryptionConfigMixin(encryptionConfig)
"The Customer managed encryption keys settings for the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withEndpointConfig
withEndpointConfig(endpointConfig)
"The config settings for port access on the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withEndpointConfigMixin
withEndpointConfigMixin(endpointConfig)
"The config settings for port access on the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withGceClusterConfig
withGceClusterConfig(gceClusterConfig)
"Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withGceClusterConfigMixin
withGceClusterConfigMixin(gceClusterConfig)
"Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withInitializationAction
withInitializationAction(initializationAction)
"Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below."
fn spec.forProvider.clusterConfig.withInitializationActionMixin
withInitializationActionMixin(initializationAction)
"Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withLifecycleConfig
withLifecycleConfig(lifecycleConfig)
"The settings for auto deletion cluster schedule. Structure defined below."
fn spec.forProvider.clusterConfig.withLifecycleConfigMixin
withLifecycleConfigMixin(lifecycleConfig)
"The settings for auto deletion cluster schedule. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withMasterConfig
withMasterConfig(masterConfig)
"The Google Compute Engine config settings for the master instances in a cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withMasterConfigMixin
withMasterConfigMixin(masterConfig)
"The Google Compute Engine config settings for the master instances in a cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withMetastoreConfig
withMetastoreConfig(metastoreConfig)
"The config setting for metastore service with the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withMetastoreConfigMixin
withMetastoreConfigMixin(metastoreConfig)
"The config setting for metastore service with the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withPreemptibleWorkerConfig
withPreemptibleWorkerConfig(preemptibleWorkerConfig)
"The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withPreemptibleWorkerConfigMixin
withPreemptibleWorkerConfigMixin(preemptibleWorkerConfig)
"The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withSecurityConfig
withSecurityConfig(securityConfig)
"Security related configuration. Structure defined below."
fn spec.forProvider.clusterConfig.withSecurityConfigMixin
withSecurityConfigMixin(securityConfig)
"Security related configuration. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withSoftwareConfig
withSoftwareConfig(softwareConfig)
"The config settings for software inside the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withSoftwareConfigMixin
withSoftwareConfigMixin(softwareConfig)
"The config settings for software inside the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.withStagingBucket
withStagingBucket(stagingBucket)
"The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option."
fn spec.forProvider.clusterConfig.withTempBucket
withTempBucket(tempBucket)
"The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you."
fn spec.forProvider.clusterConfig.withWorkerConfig
withWorkerConfig(workerConfig)
"The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below."
fn spec.forProvider.clusterConfig.withWorkerConfigMixin
withWorkerConfigMixin(workerConfig)
"The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below."
Note: This function appends passed data to existing values
obj spec.forProvider.clusterConfig.autoscalingConfig
"The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = \"\", rather than removing the whole block. Structure defined below."
fn spec.forProvider.clusterConfig.autoscalingConfig.withPolicyUri
withPolicyUri(policyUri)
"The autoscaling policy used by the cluster."
obj spec.forProvider.clusterConfig.dataprocMetricConfig
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below."
fn spec.forProvider.clusterConfig.dataprocMetricConfig.withMetrics
withMetrics(metrics)
"Metrics sources to enable."
fn spec.forProvider.clusterConfig.dataprocMetricConfig.withMetricsMixin
withMetricsMixin(metrics)
"Metrics sources to enable."
Note: This function appends passed data to existing values
obj spec.forProvider.clusterConfig.dataprocMetricConfig.metrics
"Metrics sources to enable."
fn spec.forProvider.clusterConfig.dataprocMetricConfig.metrics.withMetricOverrides
withMetricOverrides(metricOverrides)
"One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course."
fn spec.forProvider.clusterConfig.dataprocMetricConfig.metrics.withMetricOverridesMixin
withMetricOverridesMixin(metricOverrides)
"One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.dataprocMetricConfig.metrics.withMetricSource
withMetricSource(metricSource)
"A source for the collection of Dataproc OSS metrics (see available OSS metrics)."
obj spec.forProvider.clusterConfig.encryptionConfig
"The Customer managed encryption keys settings for the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.encryptionConfig.withKmsKeyName
withKmsKeyName(kmsKeyName)
"The Cloud KMS key name to use for PD disk encryption for all instances in the cluster."
obj spec.forProvider.clusterConfig.endpointConfig
"The config settings for port access on the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.endpointConfig.withEnableHttpPortAccess
withEnableHttpPortAccess(enableHttpPortAccess)
"The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false."
obj spec.forProvider.clusterConfig.gceClusterConfig
"Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.gceClusterConfig.withInternalIpOnly
withInternalIpOnly(internalIpOnly)
"By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in."
fn spec.forProvider.clusterConfig.gceClusterConfig.withMetadata
withMetadata(metadata)
"A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata)."
fn spec.forProvider.clusterConfig.gceClusterConfig.withMetadataMixin
withMetadataMixin(metadata)
"A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata)."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.gceClusterConfig.withNetwork
withNetwork(network)
"The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the \"default\" network."
fn spec.forProvider.clusterConfig.gceClusterConfig.withNodeGroupAffinity
withNodeGroupAffinity(nodeGroupAffinity)
"Node Group Affinity for sole-tenant clusters."
fn spec.forProvider.clusterConfig.gceClusterConfig.withNodeGroupAffinityMixin
withNodeGroupAffinityMixin(nodeGroupAffinity)
"Node Group Affinity for sole-tenant clusters."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.gceClusterConfig.withReservationAffinity
withReservationAffinity(reservationAffinity)
"Reservation Affinity for consuming zonal reservation."
fn spec.forProvider.clusterConfig.gceClusterConfig.withReservationAffinityMixin
withReservationAffinityMixin(reservationAffinity)
"Reservation Affinity for consuming zonal reservation."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.gceClusterConfig.withServiceAccount
withServiceAccount(serviceAccount)
"The service account to be used by the Node VMs. If not specified, the \"default\" service account is used."
fn spec.forProvider.clusterConfig.gceClusterConfig.withServiceAccountScopes
withServiceAccountScopes(serviceAccountScopes)
"The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here."
fn spec.forProvider.clusterConfig.gceClusterConfig.withServiceAccountScopesMixin
withServiceAccountScopesMixin(serviceAccountScopes)
"The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.gceClusterConfig.withShieldedInstanceConfig
withShieldedInstanceConfig(shieldedInstanceConfig)
"Shielded Instance Config for clusters using Compute Engine Shielded VMs."
fn spec.forProvider.clusterConfig.gceClusterConfig.withShieldedInstanceConfigMixin
withShieldedInstanceConfigMixin(shieldedInstanceConfig)
"Shielded Instance Config for clusters using Compute Engine Shielded VMs."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.gceClusterConfig.withSubnetwork
withSubnetwork(subnetwork)
"The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network."
fn spec.forProvider.clusterConfig.gceClusterConfig.withTags
withTags(tags)
"The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls."
fn spec.forProvider.clusterConfig.gceClusterConfig.withTagsMixin
withTagsMixin(tags)
"The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.gceClusterConfig.withZone
withZone(zone)
"The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type."
obj spec.forProvider.clusterConfig.gceClusterConfig.nodeGroupAffinity
"Node Group Affinity for sole-tenant clusters."
fn spec.forProvider.clusterConfig.gceClusterConfig.nodeGroupAffinity.withNodeGroupUri
withNodeGroupUri(nodeGroupUri)
"The URI of a sole-tenant node group resource that the cluster will be created on."
obj spec.forProvider.clusterConfig.gceClusterConfig.reservationAffinity
"Reservation Affinity for consuming zonal reservation."
fn spec.forProvider.clusterConfig.gceClusterConfig.reservationAffinity.withConsumeReservationType
withConsumeReservationType(consumeReservationType)
"Corresponds to the type of reservation consumption."
fn spec.forProvider.clusterConfig.gceClusterConfig.reservationAffinity.withKey
withKey(key)
"Corresponds to the label key of reservation resource."
fn spec.forProvider.clusterConfig.gceClusterConfig.reservationAffinity.withValues
withValues(values)
"Corresponds to the label values of reservation resource."
fn spec.forProvider.clusterConfig.gceClusterConfig.reservationAffinity.withValuesMixin
withValuesMixin(values)
"Corresponds to the label values of reservation resource."
Note: This function appends passed data to existing values
obj spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountRef
"Reference to a ServiceAccount in cloudplatform to populate serviceAccount."
fn spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountRef.policy
"Policies for referencing."
fn spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountSelector
"Selector for a ServiceAccount in cloudplatform to populate serviceAccount."
fn spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountSelector.policy
"Policies for selection."
fn spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.clusterConfig.gceClusterConfig.serviceAccountSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig
"Shielded Instance Config for clusters using Compute Engine Shielded VMs."
fn spec.forProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig.withEnableIntegrityMonitoring
withEnableIntegrityMonitoring(enableIntegrityMonitoring)
"Defines whether instances have integrity monitoring enabled."
fn spec.forProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig.withEnableSecureBoot
withEnableSecureBoot(enableSecureBoot)
"Defines whether instances have Secure Boot enabled."
fn spec.forProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig.withEnableVtpm
withEnableVtpm(enableVtpm)
"Defines whether instances have the vTPM enabled."
obj spec.forProvider.clusterConfig.initializationAction
"Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below."
fn spec.forProvider.clusterConfig.initializationAction.withScript
withScript(script)
"The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix."
fn spec.forProvider.clusterConfig.initializationAction.withTimeoutSec
withTimeoutSec(timeoutSec)
"The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300)."
obj spec.forProvider.clusterConfig.lifecycleConfig
"The settings for auto deletion cluster schedule. Structure defined below."
fn spec.forProvider.clusterConfig.lifecycleConfig.withAutoDeleteTime
withAutoDeleteTime(autoDeleteTime)
"The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: \"2014-10-02T15:01:23.045123456Z\"."
fn spec.forProvider.clusterConfig.lifecycleConfig.withIdleDeleteTtl
withIdleDeleteTtl(idleDeleteTtl)
"The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d]."
obj spec.forProvider.clusterConfig.masterConfig
"The Google Compute Engine config settings for the master instances in a cluster. Structure defined below."
fn spec.forProvider.clusterConfig.masterConfig.withAccelerators
withAccelerators(accelerators)
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times."
fn spec.forProvider.clusterConfig.masterConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.masterConfig.withDiskConfig
withDiskConfig(diskConfig)
"Disk Config"
fn spec.forProvider.clusterConfig.masterConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Disk Config"
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.masterConfig.withImageUri
withImageUri(imageUri)
"The URI for the image to use for this worker. See the guide for more information."
fn spec.forProvider.clusterConfig.masterConfig.withMachineType
withMachineType(machineType)
"The name of a Google Compute Engine machine type to create for the master. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4)."
fn spec.forProvider.clusterConfig.masterConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone."
fn spec.forProvider.clusterConfig.masterConfig.withNumInstances
withNumInstances(numInstances)
"Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1)."
obj spec.forProvider.clusterConfig.masterConfig.accelerators
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times."
fn spec.forProvider.clusterConfig.masterConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8."
fn spec.forProvider.clusterConfig.masterConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80."
obj spec.forProvider.clusterConfig.masterConfig.diskConfig
"Disk Config"
fn spec.forProvider.clusterConfig.masterConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories."
fn spec.forProvider.clusterConfig.masterConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"The disk type of the primary disk attached to each node. One of \"pd-ssd\" or \"pd-standard\". Defaults to \"pd-standard\"."
fn spec.forProvider.clusterConfig.masterConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0."
obj spec.forProvider.clusterConfig.metastoreConfig
"The config setting for metastore service with the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.metastoreConfig.withDataprocMetastoreService
withDataprocMetastoreService(dataprocMetastoreService)
"Resource name of an existing Dataproc Metastore service."
obj spec.forProvider.clusterConfig.preemptibleWorkerConfig
"The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below."
fn spec.forProvider.clusterConfig.preemptibleWorkerConfig.withDiskConfig
withDiskConfig(diskConfig)
"Disk Config"
fn spec.forProvider.clusterConfig.preemptibleWorkerConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Disk Config"
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.preemptibleWorkerConfig.withNumInstances
withNumInstances(numInstances)
"Specifies the number of preemptible nodes to create. Defaults to 0."
fn spec.forProvider.clusterConfig.preemptibleWorkerConfig.withPreemptibility
withPreemptibility(preemptibility)
"Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE Accepted values are:"
obj spec.forProvider.clusterConfig.preemptibleWorkerConfig.diskConfig
"Disk Config"
fn spec.forProvider.clusterConfig.preemptibleWorkerConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories."
fn spec.forProvider.clusterConfig.preemptibleWorkerConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"The disk type of the primary disk attached to each node. One of \"pd-ssd\" or \"pd-standard\". Defaults to \"pd-standard\"."
fn spec.forProvider.clusterConfig.preemptibleWorkerConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0."
obj spec.forProvider.clusterConfig.securityConfig
"Security related configuration. Structure defined below."
fn spec.forProvider.clusterConfig.securityConfig.withKerberosConfig
withKerberosConfig(kerberosConfig)
"Kerberos Configuration"
fn spec.forProvider.clusterConfig.securityConfig.withKerberosConfigMixin
withKerberosConfigMixin(kerberosConfig)
"Kerberos Configuration"
Note: This function appends passed data to existing values
obj spec.forProvider.clusterConfig.securityConfig.kerberosConfig
"Kerberos Configuration"
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withCrossRealmTrustAdminServer
withCrossRealmTrustAdminServer(crossRealmTrustAdminServer)
"The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withCrossRealmTrustKdc
withCrossRealmTrustKdc(crossRealmTrustKdc)
"The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withCrossRealmTrustRealm
withCrossRealmTrustRealm(crossRealmTrustRealm)
"The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withCrossRealmTrustSharedPasswordUri
withCrossRealmTrustSharedPasswordUri(crossRealmTrustSharedPasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withEnableKerberos
withEnableKerberos(enableKerberos)
"Flag to indicate whether to Kerberize the cluster."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withKdcDbKeyUri
withKdcDbKeyUri(kdcDbKeyUri)
"The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withKeyPasswordUri
withKeyPasswordUri(keyPasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withKeystorePasswordUri
withKeystorePasswordUri(keystorePasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withKeystoreUri
withKeystoreUri(keystoreUri)
"The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withKmsKeyUri
withKmsKeyUri(kmsKeyUri)
"The URI of the KMS key used to encrypt various sensitive files."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withRealm
withRealm(realm)
"The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withRootPrincipalPasswordUri
withRootPrincipalPasswordUri(rootPrincipalPasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the root principal password."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withTgtLifetimeHours
withTgtLifetimeHours(tgtLifetimeHours)
"The lifetime of the ticket granting ticket, in hours."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withTruststorePasswordUri
withTruststorePasswordUri(truststorePasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc."
fn spec.forProvider.clusterConfig.securityConfig.kerberosConfig.withTruststoreUri
withTruststoreUri(truststoreUri)
"The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate."
obj spec.forProvider.clusterConfig.softwareConfig
"The config settings for software inside the cluster. Structure defined below."
fn spec.forProvider.clusterConfig.softwareConfig.withImageVersion
withImageVersion(imageVersion)
"The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions"
fn spec.forProvider.clusterConfig.softwareConfig.withOptionalComponents
withOptionalComponents(optionalComponents)
"The set of optional components to activate on the cluster. See Available Optional Components."
fn spec.forProvider.clusterConfig.softwareConfig.withOptionalComponentsMixin
withOptionalComponentsMixin(optionalComponents)
"The set of optional components to activate on the cluster. See Available Optional Components."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.softwareConfig.withOverrideProperties
withOverrideProperties(overrideProperties)
"A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties"
fn spec.forProvider.clusterConfig.softwareConfig.withOverridePropertiesMixin
withOverridePropertiesMixin(overrideProperties)
"A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties"
Note: This function appends passed data to existing values
obj spec.forProvider.clusterConfig.workerConfig
"The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below."
fn spec.forProvider.clusterConfig.workerConfig.withAccelerators
withAccelerators(accelerators)
"The Compute Engine accelerator configuration for these instances. Can be specified multiple times."
fn spec.forProvider.clusterConfig.workerConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"The Compute Engine accelerator configuration for these instances. Can be specified multiple times."
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.workerConfig.withDiskConfig
withDiskConfig(diskConfig)
"Disk Config"
fn spec.forProvider.clusterConfig.workerConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Disk Config"
Note: This function appends passed data to existing values
fn spec.forProvider.clusterConfig.workerConfig.withImageUri
withImageUri(imageUri)
"The URI for the image to use for this worker. See the guide for more information."
fn spec.forProvider.clusterConfig.workerConfig.withMachineType
withMachineType(machineType)
"The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4)."
fn spec.forProvider.clusterConfig.workerConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone."
fn spec.forProvider.clusterConfig.workerConfig.withNumInstances
withNumInstances(numInstances)
"Specifies the number of worker nodes to create. If not specified, GCP will default to a predetermined computed value (currently 2). There is currently a beta feature which allows you to run a Single Node Cluster. In order to take advantage of this you need to set \"dataproc:dataproc.allow.zero.workers\" = \"true\" in cluster_config.software_config.properties"
obj spec.forProvider.clusterConfig.workerConfig.accelerators
"The Compute Engine accelerator configuration for these instances. Can be specified multiple times."
fn spec.forProvider.clusterConfig.workerConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8."
fn spec.forProvider.clusterConfig.workerConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80."
obj spec.forProvider.clusterConfig.workerConfig.diskConfig
"Disk Config"
fn spec.forProvider.clusterConfig.workerConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories."
fn spec.forProvider.clusterConfig.workerConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"The disk type of the primary disk attached to each node. One of \"pd-ssd\" or \"pd-standard\". Defaults to \"pd-standard\"."
fn spec.forProvider.clusterConfig.workerConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0."
obj spec.forProvider.virtualClusterConfig
"Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below."
fn spec.forProvider.virtualClusterConfig.withAuxiliaryServicesConfig
withAuxiliaryServicesConfig(auxiliaryServicesConfig)
"Configuration of auxiliary services used by this cluster. Structure defined below."
fn spec.forProvider.virtualClusterConfig.withAuxiliaryServicesConfigMixin
withAuxiliaryServicesConfigMixin(auxiliaryServicesConfig)
"Configuration of auxiliary services used by this cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.virtualClusterConfig.withKubernetesClusterConfig
withKubernetesClusterConfig(kubernetesClusterConfig)
"The configuration for running the Dataproc cluster on Kubernetes. Structure defined below."
fn spec.forProvider.virtualClusterConfig.withKubernetesClusterConfigMixin
withKubernetesClusterConfigMixin(kubernetesClusterConfig)
"The configuration for running the Dataproc cluster on Kubernetes. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.virtualClusterConfig.withStagingBucket
withStagingBucket(stagingBucket)
"The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option."
obj spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig
"Configuration of auxiliary services used by this cluster. Structure defined below."
fn spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.withMetastoreConfig
withMetastoreConfig(metastoreConfig)
"The config setting for metastore service with the cluster. Structure defined below."
fn spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.withMetastoreConfigMixin
withMetastoreConfigMixin(metastoreConfig)
"The config setting for metastore service with the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.withSparkHistoryServerConfig
withSparkHistoryServerConfig(sparkHistoryServerConfig)
"The Spark History Server configuration for the workload."
fn spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.withSparkHistoryServerConfigMixin
withSparkHistoryServerConfigMixin(sparkHistoryServerConfig)
"The Spark History Server configuration for the workload."
Note: This function appends passed data to existing values
obj spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.metastoreConfig
"The config setting for metastore service with the cluster. Structure defined below."
fn spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.metastoreConfig.withDataprocMetastoreService
withDataprocMetastoreService(dataprocMetastoreService)
"Resource name of an existing Dataproc Metastore service."
obj spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.sparkHistoryServerConfig
"The Spark History Server configuration for the workload."
fn spec.forProvider.virtualClusterConfig.auxiliaryServicesConfig.sparkHistoryServerConfig.withDataprocCluster
withDataprocCluster(dataprocCluster)
"Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload."
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig
"The configuration for running the Dataproc cluster on Kubernetes. Structure defined below."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.withGkeClusterConfig
withGkeClusterConfig(gkeClusterConfig)
"The configuration for running the Dataproc cluster on GKE."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.withGkeClusterConfigMixin
withGkeClusterConfigMixin(gkeClusterConfig)
"The configuration for running the Dataproc cluster on GKE."
Note: This function appends passed data to existing values
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.withKubernetesNamespace
withKubernetesNamespace(kubernetesNamespace)
"A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.withKubernetesSoftwareConfig
withKubernetesSoftwareConfig(kubernetesSoftwareConfig)
"The software configuration for this Dataproc cluster running on Kubernetes."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.withKubernetesSoftwareConfigMixin
withKubernetesSoftwareConfigMixin(kubernetesSoftwareConfig)
"The software configuration for this Dataproc cluster running on Kubernetes."
Note: This function appends passed data to existing values
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig
"The configuration for running the Dataproc cluster on GKE."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.withGkeClusterTarget
withGkeClusterTarget(gkeClusterTarget)
"A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)"
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.withNodePoolTarget
withNodePoolTarget(nodePoolTarget)
"GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.withNodePoolTargetMixin
withNodePoolTargetMixin(nodePoolTarget)
"GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings."
Note: This function appends passed data to existing values
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget
"GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withNodePool
withNodePool(nodePool)
"The target GKE node pool."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withNodePoolConfig
withNodePoolConfig(nodePoolConfig)
"(Input only) The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withNodePoolConfigMixin
withNodePoolConfigMixin(nodePoolConfig)
"(Input only) The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail."
Note: This function appends passed data to existing values
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withRoles
withRoles(roles)
"The roles associated with the GKE node pool. One of \"DEFAULT\", \"CONTROLLER\", \"SPARK_DRIVER\" or \"SPARK_EXECUTOR\"."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withRolesMixin
withRolesMixin(roles)
"The roles associated with the GKE node pool. One of \"DEFAULT\", \"CONTROLLER\", \"SPARK_DRIVER\" or \"SPARK_EXECUTOR\"."
Note: This function appends passed data to existing values
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig
"(Input only) The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withAutoscaling
withAutoscaling(autoscaling)
"The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withAutoscalingMixin
withAutoscalingMixin(autoscaling)
"The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present."
Note: This function appends passed data to existing values
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withConfig
withConfig(config)
"The node pool configuration."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withConfigMixin
withConfigMixin(config)
"The node pool configuration."
Note: This function appends passed data to existing values
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withLocations
withLocations(locations)
"The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withLocationsMixin
withLocationsMixin(locations)
"The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located."
Note: This function appends passed data to existing values
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.autoscaling
"The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.autoscaling.withMaxNodeCount
withMaxNodeCount(maxNodeCount)
"The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.autoscaling.withMinNodeCount
withMinNodeCount(minNodeCount)
"The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount."
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config
"The node pool configuration."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withLocalSsdCount
withLocalSsdCount(localSsdCount)
"The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withMachineType
withMachineType(machineType)
"The name of a Compute Engine machine type."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as \"Intel Haswell\" or \"Intel Sandy Bridge\"."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withPreemptible
withPreemptible(preemptible)
"Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role)."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withSpot
withSpot(spot)
"Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag."
obj spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig
"The software configuration for this Dataproc cluster running on Kubernetes."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig.withComponentVersion
withComponentVersion(componentVersion)
"The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig.withComponentVersionMixin
withComponentVersionMixin(componentVersion)
"The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified."
Note: This function appends passed data to existing values
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig.withProperties
withProperties(properties)
"The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image."
fn spec.forProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig.withPropertiesMixin
withPropertiesMixin(properties)
"The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image."
Note: This function appends passed data to existing values
obj spec.initProvider
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. InitProvider holds the same fields as ForProvider, with the exception of Identifier and other resource reference fields. The fields that are in InitProvider are merged into ForProvider when the resource is created. The same fields are also added to the terraform ignore_changes hook, to avoid updating them after creation. This is useful for fields that are required on creation, but we do not desire to update them after creation, for example because of an external controller is managing them, like an autoscaler."
fn spec.initProvider.withClusterConfig
withClusterConfig(clusterConfig)
"Allows you to configure various aspects of the cluster. Structure defined below."
fn spec.initProvider.withClusterConfigMixin
withClusterConfigMixin(clusterConfig)
"Allows you to configure various aspects of the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.withGracefulDecommissionTimeout
withGracefulDecommissionTimeout(gracefulDecommissionTimeout)
"Does not affect auto scaling decomissioning from an autoscaling policy. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration). Only supported on Dataproc image versions 1.2 and higher. For more context see the docs"
fn spec.initProvider.withLabels
withLabels(labels)
"The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster."
fn spec.initProvider.withLabelsMixin
withLabelsMixin(labels)
"The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster."
Note: This function appends passed data to existing values
fn spec.initProvider.withName
withName(name)
"The name of the cluster, unique within the project and zone."
fn spec.initProvider.withProject
withProject(project)
"The ID of the project in which the cluster will exist. If it is not provided, the provider project is used."
fn spec.initProvider.withRegion
withRegion(region)
"The region in which the cluster and associated nodes will be created in. Defaults to global."
fn spec.initProvider.withVirtualClusterConfig
withVirtualClusterConfig(virtualClusterConfig)
"Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below."
fn spec.initProvider.withVirtualClusterConfigMixin
withVirtualClusterConfigMixin(virtualClusterConfig)
"Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below."
Note: This function appends passed data to existing values
obj spec.initProvider.clusterConfig
"Allows you to configure various aspects of the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withAutoscalingConfig
withAutoscalingConfig(autoscalingConfig)
"The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = \"\", rather than removing the whole block. Structure defined below."
fn spec.initProvider.clusterConfig.withAutoscalingConfigMixin
withAutoscalingConfigMixin(autoscalingConfig)
"The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = \"\", rather than removing the whole block. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withDataprocMetricConfig
withDataprocMetricConfig(dataprocMetricConfig)
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below."
fn spec.initProvider.clusterConfig.withDataprocMetricConfigMixin
withDataprocMetricConfigMixin(dataprocMetricConfig)
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withEncryptionConfig
withEncryptionConfig(encryptionConfig)
"The Customer managed encryption keys settings for the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withEncryptionConfigMixin
withEncryptionConfigMixin(encryptionConfig)
"The Customer managed encryption keys settings for the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withEndpointConfig
withEndpointConfig(endpointConfig)
"The config settings for port access on the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withEndpointConfigMixin
withEndpointConfigMixin(endpointConfig)
"The config settings for port access on the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withGceClusterConfig
withGceClusterConfig(gceClusterConfig)
"Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withGceClusterConfigMixin
withGceClusterConfigMixin(gceClusterConfig)
"Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withInitializationAction
withInitializationAction(initializationAction)
"Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below."
fn spec.initProvider.clusterConfig.withInitializationActionMixin
withInitializationActionMixin(initializationAction)
"Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withLifecycleConfig
withLifecycleConfig(lifecycleConfig)
"The settings for auto deletion cluster schedule. Structure defined below."
fn spec.initProvider.clusterConfig.withLifecycleConfigMixin
withLifecycleConfigMixin(lifecycleConfig)
"The settings for auto deletion cluster schedule. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withMasterConfig
withMasterConfig(masterConfig)
"The Google Compute Engine config settings for the master instances in a cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withMasterConfigMixin
withMasterConfigMixin(masterConfig)
"The Google Compute Engine config settings for the master instances in a cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withMetastoreConfig
withMetastoreConfig(metastoreConfig)
"The config setting for metastore service with the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withMetastoreConfigMixin
withMetastoreConfigMixin(metastoreConfig)
"The config setting for metastore service with the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withPreemptibleWorkerConfig
withPreemptibleWorkerConfig(preemptibleWorkerConfig)
"The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withPreemptibleWorkerConfigMixin
withPreemptibleWorkerConfigMixin(preemptibleWorkerConfig)
"The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withSecurityConfig
withSecurityConfig(securityConfig)
"Security related configuration. Structure defined below."
fn spec.initProvider.clusterConfig.withSecurityConfigMixin
withSecurityConfigMixin(securityConfig)
"Security related configuration. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withSoftwareConfig
withSoftwareConfig(softwareConfig)
"The config settings for software inside the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withSoftwareConfigMixin
withSoftwareConfigMixin(softwareConfig)
"The config settings for software inside the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.withStagingBucket
withStagingBucket(stagingBucket)
"The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option."
fn spec.initProvider.clusterConfig.withTempBucket
withTempBucket(tempBucket)
"The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you."
fn spec.initProvider.clusterConfig.withWorkerConfig
withWorkerConfig(workerConfig)
"The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below."
fn spec.initProvider.clusterConfig.withWorkerConfigMixin
withWorkerConfigMixin(workerConfig)
"The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below."
Note: This function appends passed data to existing values
obj spec.initProvider.clusterConfig.autoscalingConfig
"The autoscaling policy config associated with the cluster. Note that once set, if autoscaling_config is the only field set in cluster_config, it can only be removed by setting policy_uri = \"\", rather than removing the whole block. Structure defined below."
fn spec.initProvider.clusterConfig.autoscalingConfig.withPolicyUri
withPolicyUri(policyUri)
"The autoscaling policy used by the cluster."
obj spec.initProvider.clusterConfig.dataprocMetricConfig
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. Structure defined below."
fn spec.initProvider.clusterConfig.dataprocMetricConfig.withMetrics
withMetrics(metrics)
"Metrics sources to enable."
fn spec.initProvider.clusterConfig.dataprocMetricConfig.withMetricsMixin
withMetricsMixin(metrics)
"Metrics sources to enable."
Note: This function appends passed data to existing values
obj spec.initProvider.clusterConfig.dataprocMetricConfig.metrics
"Metrics sources to enable."
fn spec.initProvider.clusterConfig.dataprocMetricConfig.metrics.withMetricOverrides
withMetricOverrides(metricOverrides)
"One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course."
fn spec.initProvider.clusterConfig.dataprocMetricConfig.metrics.withMetricOverridesMixin
withMetricOverridesMixin(metricOverrides)
"One or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.dataprocMetricConfig.metrics.withMetricSource
withMetricSource(metricSource)
"A source for the collection of Dataproc OSS metrics (see available OSS metrics)."
obj spec.initProvider.clusterConfig.encryptionConfig
"The Customer managed encryption keys settings for the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.encryptionConfig.withKmsKeyName
withKmsKeyName(kmsKeyName)
"The Cloud KMS key name to use for PD disk encryption for all instances in the cluster."
obj spec.initProvider.clusterConfig.endpointConfig
"The config settings for port access on the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.endpointConfig.withEnableHttpPortAccess
withEnableHttpPortAccess(enableHttpPortAccess)
"The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false."
obj spec.initProvider.clusterConfig.gceClusterConfig
"Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.gceClusterConfig.withInternalIpOnly
withInternalIpOnly(internalIpOnly)
"By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in."
fn spec.initProvider.clusterConfig.gceClusterConfig.withMetadata
withMetadata(metadata)
"A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata)."
fn spec.initProvider.clusterConfig.gceClusterConfig.withMetadataMixin
withMetadataMixin(metadata)
"A map of the Compute Engine metadata entries to add to all instances (see Project and instance metadata)."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.gceClusterConfig.withNetwork
withNetwork(network)
"The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the \"default\" network."
fn spec.initProvider.clusterConfig.gceClusterConfig.withNodeGroupAffinity
withNodeGroupAffinity(nodeGroupAffinity)
"Node Group Affinity for sole-tenant clusters."
fn spec.initProvider.clusterConfig.gceClusterConfig.withNodeGroupAffinityMixin
withNodeGroupAffinityMixin(nodeGroupAffinity)
"Node Group Affinity for sole-tenant clusters."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.gceClusterConfig.withReservationAffinity
withReservationAffinity(reservationAffinity)
"Reservation Affinity for consuming zonal reservation."
fn spec.initProvider.clusterConfig.gceClusterConfig.withReservationAffinityMixin
withReservationAffinityMixin(reservationAffinity)
"Reservation Affinity for consuming zonal reservation."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.gceClusterConfig.withServiceAccountScopes
withServiceAccountScopes(serviceAccountScopes)
"The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here."
fn spec.initProvider.clusterConfig.gceClusterConfig.withServiceAccountScopesMixin
withServiceAccountScopesMixin(serviceAccountScopes)
"The set of Google API scopes to be made available on all of the node VMs under the service_account specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope. See a complete list of scopes here."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.gceClusterConfig.withShieldedInstanceConfig
withShieldedInstanceConfig(shieldedInstanceConfig)
"Shielded Instance Config for clusters using Compute Engine Shielded VMs."
fn spec.initProvider.clusterConfig.gceClusterConfig.withShieldedInstanceConfigMixin
withShieldedInstanceConfigMixin(shieldedInstanceConfig)
"Shielded Instance Config for clusters using Compute Engine Shielded VMs."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.gceClusterConfig.withSubnetwork
withSubnetwork(subnetwork)
"The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network."
fn spec.initProvider.clusterConfig.gceClusterConfig.withTags
withTags(tags)
"The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls."
fn spec.initProvider.clusterConfig.gceClusterConfig.withTagsMixin
withTagsMixin(tags)
"The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.gceClusterConfig.withZone
withZone(zone)
"The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type."
obj spec.initProvider.clusterConfig.gceClusterConfig.nodeGroupAffinity
"Node Group Affinity for sole-tenant clusters."
fn spec.initProvider.clusterConfig.gceClusterConfig.nodeGroupAffinity.withNodeGroupUri
withNodeGroupUri(nodeGroupUri)
"The URI of a sole-tenant node group resource that the cluster will be created on."
obj spec.initProvider.clusterConfig.gceClusterConfig.reservationAffinity
"Reservation Affinity for consuming zonal reservation."
fn spec.initProvider.clusterConfig.gceClusterConfig.reservationAffinity.withConsumeReservationType
withConsumeReservationType(consumeReservationType)
"Corresponds to the type of reservation consumption."
fn spec.initProvider.clusterConfig.gceClusterConfig.reservationAffinity.withKey
withKey(key)
"Corresponds to the label key of reservation resource."
fn spec.initProvider.clusterConfig.gceClusterConfig.reservationAffinity.withValues
withValues(values)
"Corresponds to the label values of reservation resource."
fn spec.initProvider.clusterConfig.gceClusterConfig.reservationAffinity.withValuesMixin
withValuesMixin(values)
"Corresponds to the label values of reservation resource."
Note: This function appends passed data to existing values
obj spec.initProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig
"Shielded Instance Config for clusters using Compute Engine Shielded VMs."
fn spec.initProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig.withEnableIntegrityMonitoring
withEnableIntegrityMonitoring(enableIntegrityMonitoring)
"Defines whether instances have integrity monitoring enabled."
fn spec.initProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig.withEnableSecureBoot
withEnableSecureBoot(enableSecureBoot)
"Defines whether instances have Secure Boot enabled."
fn spec.initProvider.clusterConfig.gceClusterConfig.shieldedInstanceConfig.withEnableVtpm
withEnableVtpm(enableVtpm)
"Defines whether instances have the vTPM enabled."
obj spec.initProvider.clusterConfig.initializationAction
"Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below."
fn spec.initProvider.clusterConfig.initializationAction.withScript
withScript(script)
"The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix."
fn spec.initProvider.clusterConfig.initializationAction.withTimeoutSec
withTimeoutSec(timeoutSec)
"The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300)."
obj spec.initProvider.clusterConfig.lifecycleConfig
"The settings for auto deletion cluster schedule. Structure defined below."
fn spec.initProvider.clusterConfig.lifecycleConfig.withAutoDeleteTime
withAutoDeleteTime(autoDeleteTime)
"The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. Example: \"2014-10-02T15:01:23.045123456Z\"."
fn spec.initProvider.clusterConfig.lifecycleConfig.withIdleDeleteTtl
withIdleDeleteTtl(idleDeleteTtl)
"The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d]."
obj spec.initProvider.clusterConfig.masterConfig
"The Google Compute Engine config settings for the master instances in a cluster. Structure defined below."
fn spec.initProvider.clusterConfig.masterConfig.withAccelerators
withAccelerators(accelerators)
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times."
fn spec.initProvider.clusterConfig.masterConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.masterConfig.withDiskConfig
withDiskConfig(diskConfig)
"Disk Config"
fn spec.initProvider.clusterConfig.masterConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Disk Config"
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.masterConfig.withImageUri
withImageUri(imageUri)
"The URI for the image to use for this worker. See the guide for more information."
fn spec.initProvider.clusterConfig.masterConfig.withMachineType
withMachineType(machineType)
"The name of a Google Compute Engine machine type to create for the master. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4)."
fn spec.initProvider.clusterConfig.masterConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone."
fn spec.initProvider.clusterConfig.masterConfig.withNumInstances
withNumInstances(numInstances)
"Specifies the number of master nodes to create. If not specified, GCP will default to a predetermined computed value (currently 1)."
obj spec.initProvider.clusterConfig.masterConfig.accelerators
"The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times."
fn spec.initProvider.clusterConfig.masterConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8."
fn spec.initProvider.clusterConfig.masterConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80."
obj spec.initProvider.clusterConfig.masterConfig.diskConfig
"Disk Config"
fn spec.initProvider.clusterConfig.masterConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories."
fn spec.initProvider.clusterConfig.masterConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"The disk type of the primary disk attached to each node. One of \"pd-ssd\" or \"pd-standard\". Defaults to \"pd-standard\"."
fn spec.initProvider.clusterConfig.masterConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0."
obj spec.initProvider.clusterConfig.metastoreConfig
"The config setting for metastore service with the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.metastoreConfig.withDataprocMetastoreService
withDataprocMetastoreService(dataprocMetastoreService)
"Resource name of an existing Dataproc Metastore service."
obj spec.initProvider.clusterConfig.preemptibleWorkerConfig
"The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below."
fn spec.initProvider.clusterConfig.preemptibleWorkerConfig.withDiskConfig
withDiskConfig(diskConfig)
"Disk Config"
fn spec.initProvider.clusterConfig.preemptibleWorkerConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Disk Config"
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.preemptibleWorkerConfig.withNumInstances
withNumInstances(numInstances)
"Specifies the number of preemptible nodes to create. Defaults to 0."
fn spec.initProvider.clusterConfig.preemptibleWorkerConfig.withPreemptibility
withPreemptibility(preemptibility)
"Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE Accepted values are:"
obj spec.initProvider.clusterConfig.preemptibleWorkerConfig.diskConfig
"Disk Config"
fn spec.initProvider.clusterConfig.preemptibleWorkerConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories."
fn spec.initProvider.clusterConfig.preemptibleWorkerConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"The disk type of the primary disk attached to each node. One of \"pd-ssd\" or \"pd-standard\". Defaults to \"pd-standard\"."
fn spec.initProvider.clusterConfig.preemptibleWorkerConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0."
obj spec.initProvider.clusterConfig.securityConfig
"Security related configuration. Structure defined below."
fn spec.initProvider.clusterConfig.securityConfig.withKerberosConfig
withKerberosConfig(kerberosConfig)
"Kerberos Configuration"
fn spec.initProvider.clusterConfig.securityConfig.withKerberosConfigMixin
withKerberosConfigMixin(kerberosConfig)
"Kerberos Configuration"
Note: This function appends passed data to existing values
obj spec.initProvider.clusterConfig.securityConfig.kerberosConfig
"Kerberos Configuration"
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withCrossRealmTrustAdminServer
withCrossRealmTrustAdminServer(crossRealmTrustAdminServer)
"The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withCrossRealmTrustKdc
withCrossRealmTrustKdc(crossRealmTrustKdc)
"The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withCrossRealmTrustRealm
withCrossRealmTrustRealm(crossRealmTrustRealm)
"The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withCrossRealmTrustSharedPasswordUri
withCrossRealmTrustSharedPasswordUri(crossRealmTrustSharedPasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withEnableKerberos
withEnableKerberos(enableKerberos)
"Flag to indicate whether to Kerberize the cluster."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withKdcDbKeyUri
withKdcDbKeyUri(kdcDbKeyUri)
"The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withKeyPasswordUri
withKeyPasswordUri(keyPasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withKeystorePasswordUri
withKeystorePasswordUri(keystorePasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withKeystoreUri
withKeystoreUri(keystoreUri)
"The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withKmsKeyUri
withKmsKeyUri(kmsKeyUri)
"The URI of the KMS key used to encrypt various sensitive files."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withRealm
withRealm(realm)
"The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withRootPrincipalPasswordUri
withRootPrincipalPasswordUri(rootPrincipalPasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the root principal password."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withTgtLifetimeHours
withTgtLifetimeHours(tgtLifetimeHours)
"The lifetime of the ticket granting ticket, in hours."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withTruststorePasswordUri
withTruststorePasswordUri(truststorePasswordUri)
"The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc."
fn spec.initProvider.clusterConfig.securityConfig.kerberosConfig.withTruststoreUri
withTruststoreUri(truststoreUri)
"The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate."
obj spec.initProvider.clusterConfig.softwareConfig
"The config settings for software inside the cluster. Structure defined below."
fn spec.initProvider.clusterConfig.softwareConfig.withImageVersion
withImageVersion(imageVersion)
"The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see Cloud Dataproc versions"
fn spec.initProvider.clusterConfig.softwareConfig.withOptionalComponents
withOptionalComponents(optionalComponents)
"The set of optional components to activate on the cluster. See Available Optional Components."
fn spec.initProvider.clusterConfig.softwareConfig.withOptionalComponentsMixin
withOptionalComponentsMixin(optionalComponents)
"The set of optional components to activate on the cluster. See Available Optional Components."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.softwareConfig.withOverrideProperties
withOverrideProperties(overrideProperties)
"A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties"
fn spec.initProvider.clusterConfig.softwareConfig.withOverridePropertiesMixin
withOverridePropertiesMixin(overrideProperties)
"A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see Cluster properties"
Note: This function appends passed data to existing values
obj spec.initProvider.clusterConfig.workerConfig
"The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below."
fn spec.initProvider.clusterConfig.workerConfig.withAccelerators
withAccelerators(accelerators)
"The Compute Engine accelerator configuration for these instances. Can be specified multiple times."
fn spec.initProvider.clusterConfig.workerConfig.withAcceleratorsMixin
withAcceleratorsMixin(accelerators)
"The Compute Engine accelerator configuration for these instances. Can be specified multiple times."
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.workerConfig.withDiskConfig
withDiskConfig(diskConfig)
"Disk Config"
fn spec.initProvider.clusterConfig.workerConfig.withDiskConfigMixin
withDiskConfigMixin(diskConfig)
"Disk Config"
Note: This function appends passed data to existing values
fn spec.initProvider.clusterConfig.workerConfig.withImageUri
withImageUri(imageUri)
"The URI for the image to use for this worker. See the guide for more information."
fn spec.initProvider.clusterConfig.workerConfig.withMachineType
withMachineType(machineType)
"The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently n1-standard-4)."
fn spec.initProvider.clusterConfig.workerConfig.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See the guide for details about which CPU families are available (and defaulted) for each zone."
fn spec.initProvider.clusterConfig.workerConfig.withNumInstances
withNumInstances(numInstances)
"Specifies the number of worker nodes to create. If not specified, GCP will default to a predetermined computed value (currently 2). There is currently a beta feature which allows you to run a Single Node Cluster. In order to take advantage of this you need to set \"dataproc:dataproc.allow.zero.workers\" = \"true\" in cluster_config.software_config.properties"
obj spec.initProvider.clusterConfig.workerConfig.accelerators
"The Compute Engine accelerator configuration for these instances. Can be specified multiple times."
fn spec.initProvider.clusterConfig.workerConfig.accelerators.withAcceleratorCount
withAcceleratorCount(acceleratorCount)
"The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8."
fn spec.initProvider.clusterConfig.workerConfig.accelerators.withAcceleratorType
withAcceleratorType(acceleratorType)
"The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80."
obj spec.initProvider.clusterConfig.workerConfig.diskConfig
"Disk Config"
fn spec.initProvider.clusterConfig.workerConfig.diskConfig.withBootDiskSizeGb
withBootDiskSizeGb(bootDiskSizeGb)
"Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories."
fn spec.initProvider.clusterConfig.workerConfig.diskConfig.withBootDiskType
withBootDiskType(bootDiskType)
"The disk type of the primary disk attached to each node. One of \"pd-ssd\" or \"pd-standard\". Defaults to \"pd-standard\"."
fn spec.initProvider.clusterConfig.workerConfig.diskConfig.withNumLocalSsds
withNumLocalSsds(numLocalSsds)
"The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0."
obj spec.initProvider.virtualClusterConfig
"Allows you to configure a virtual Dataproc on GKE cluster. Structure defined below."
fn spec.initProvider.virtualClusterConfig.withAuxiliaryServicesConfig
withAuxiliaryServicesConfig(auxiliaryServicesConfig)
"Configuration of auxiliary services used by this cluster. Structure defined below."
fn spec.initProvider.virtualClusterConfig.withAuxiliaryServicesConfigMixin
withAuxiliaryServicesConfigMixin(auxiliaryServicesConfig)
"Configuration of auxiliary services used by this cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.virtualClusterConfig.withKubernetesClusterConfig
withKubernetesClusterConfig(kubernetesClusterConfig)
"The configuration for running the Dataproc cluster on Kubernetes. Structure defined below."
fn spec.initProvider.virtualClusterConfig.withKubernetesClusterConfigMixin
withKubernetesClusterConfigMixin(kubernetesClusterConfig)
"The configuration for running the Dataproc cluster on Kubernetes. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.virtualClusterConfig.withStagingBucket
withStagingBucket(stagingBucket)
"The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option."
obj spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig
"Configuration of auxiliary services used by this cluster. Structure defined below."
fn spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.withMetastoreConfig
withMetastoreConfig(metastoreConfig)
"The config setting for metastore service with the cluster. Structure defined below."
fn spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.withMetastoreConfigMixin
withMetastoreConfigMixin(metastoreConfig)
"The config setting for metastore service with the cluster. Structure defined below."
Note: This function appends passed data to existing values
fn spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.withSparkHistoryServerConfig
withSparkHistoryServerConfig(sparkHistoryServerConfig)
"The Spark History Server configuration for the workload."
fn spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.withSparkHistoryServerConfigMixin
withSparkHistoryServerConfigMixin(sparkHistoryServerConfig)
"The Spark History Server configuration for the workload."
Note: This function appends passed data to existing values
obj spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.metastoreConfig
"The config setting for metastore service with the cluster. Structure defined below."
fn spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.metastoreConfig.withDataprocMetastoreService
withDataprocMetastoreService(dataprocMetastoreService)
"Resource name of an existing Dataproc Metastore service."
obj spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.sparkHistoryServerConfig
"The Spark History Server configuration for the workload."
fn spec.initProvider.virtualClusterConfig.auxiliaryServicesConfig.sparkHistoryServerConfig.withDataprocCluster
withDataprocCluster(dataprocCluster)
"Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload."
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig
"The configuration for running the Dataproc cluster on Kubernetes. Structure defined below."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.withGkeClusterConfig
withGkeClusterConfig(gkeClusterConfig)
"The configuration for running the Dataproc cluster on GKE."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.withGkeClusterConfigMixin
withGkeClusterConfigMixin(gkeClusterConfig)
"The configuration for running the Dataproc cluster on GKE."
Note: This function appends passed data to existing values
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.withKubernetesNamespace
withKubernetesNamespace(kubernetesNamespace)
"A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.withKubernetesSoftwareConfig
withKubernetesSoftwareConfig(kubernetesSoftwareConfig)
"The software configuration for this Dataproc cluster running on Kubernetes."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.withKubernetesSoftwareConfigMixin
withKubernetesSoftwareConfigMixin(kubernetesSoftwareConfig)
"The software configuration for this Dataproc cluster running on Kubernetes."
Note: This function appends passed data to existing values
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig
"The configuration for running the Dataproc cluster on GKE."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.withGkeClusterTarget
withGkeClusterTarget(gkeClusterTarget)
"A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional)"
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.withNodePoolTarget
withNodePoolTarget(nodePoolTarget)
"GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.withNodePoolTargetMixin
withNodePoolTargetMixin(nodePoolTarget)
"GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings."
Note: This function appends passed data to existing values
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget
"GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withNodePool
withNodePool(nodePool)
"The target GKE node pool."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withNodePoolConfig
withNodePoolConfig(nodePoolConfig)
"(Input only) The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withNodePoolConfigMixin
withNodePoolConfigMixin(nodePoolConfig)
"(Input only) The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail."
Note: This function appends passed data to existing values
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withRoles
withRoles(roles)
"The roles associated with the GKE node pool. One of \"DEFAULT\", \"CONTROLLER\", \"SPARK_DRIVER\" or \"SPARK_EXECUTOR\"."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.withRolesMixin
withRolesMixin(roles)
"The roles associated with the GKE node pool. One of \"DEFAULT\", \"CONTROLLER\", \"SPARK_DRIVER\" or \"SPARK_EXECUTOR\"."
Note: This function appends passed data to existing values
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig
"(Input only) The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withAutoscaling
withAutoscaling(autoscaling)
"The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withAutoscalingMixin
withAutoscalingMixin(autoscaling)
"The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present."
Note: This function appends passed data to existing values
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withConfig
withConfig(config)
"The node pool configuration."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withConfigMixin
withConfigMixin(config)
"The node pool configuration."
Note: This function appends passed data to existing values
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withLocations
withLocations(locations)
"The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.withLocationsMixin
withLocationsMixin(locations)
"The list of Compute Engine zones where node pool nodes associated with a Dataproc on GKE virtual cluster will be located."
Note: This function appends passed data to existing values
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.autoscaling
"The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.autoscaling.withMaxNodeCount
withMaxNodeCount(maxNodeCount)
"The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.autoscaling.withMinNodeCount
withMinNodeCount(minNodeCount)
"The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount."
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config
"The node pool configuration."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withLocalSsdCount
withLocalSsdCount(localSsdCount)
"The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withMachineType
withMachineType(machineType)
"The name of a Compute Engine machine type."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withMinCpuPlatform
withMinCpuPlatform(minCpuPlatform)
"Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as \"Intel Haswell\" or \"Intel Sandy Bridge\"."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withPreemptible
withPreemptible(preemptible)
"Whether the nodes are created as preemptible VM instances. Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role)."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.gkeClusterConfig.nodePoolTarget.nodePoolConfig.config.withSpot
withSpot(spot)
"Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag."
obj spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig
"The software configuration for this Dataproc cluster running on Kubernetes."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig.withComponentVersion
withComponentVersion(componentVersion)
"The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig.withComponentVersionMixin
withComponentVersionMixin(componentVersion)
"The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified."
Note: This function appends passed data to existing values
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig.withProperties
withProperties(properties)
"The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image."
fn spec.initProvider.virtualClusterConfig.kubernetesClusterConfig.kubernetesSoftwareConfig.withPropertiesMixin
withPropertiesMixin(properties)
"The properties to set on daemon config files. Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image."
Note: This function appends passed data to existing values
obj spec.providerConfigRef
"ProviderConfigReference specifies how the provider that will be used to create, observe, update, and delete this managed resource should be configured."
fn spec.providerConfigRef.withName
withName(name)
"Name of the referenced object."
obj spec.providerConfigRef.policy
"Policies for referencing."
fn spec.providerConfigRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.providerConfigRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.providerRef
"ProviderReference specifies the provider that will be used to create, observe, update, and delete this managed resource. Deprecated: Please use ProviderConfigReference, i.e. providerConfigRef
"
fn spec.providerRef.withName
withName(name)
"Name of the referenced object."
obj spec.providerRef.policy
"Policies for referencing."
fn spec.providerRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.providerRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.publishConnectionDetailsTo
"PublishConnectionDetailsTo specifies the connection secret config which contains a name, metadata and a reference to secret store config to which any connection details for this managed resource should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource."
fn spec.publishConnectionDetailsTo.withName
withName(name)
"Name is the name of the connection secret."
obj spec.publishConnectionDetailsTo.configRef
"SecretStoreConfigRef specifies which secret store config should be used for this ConnectionSecret."
fn spec.publishConnectionDetailsTo.configRef.withName
withName(name)
"Name of the referenced object."
obj spec.publishConnectionDetailsTo.configRef.policy
"Policies for referencing."
fn spec.publishConnectionDetailsTo.configRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.publishConnectionDetailsTo.configRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.publishConnectionDetailsTo.metadata
"Metadata is the metadata for connection secret."
fn spec.publishConnectionDetailsTo.metadata.withAnnotations
withAnnotations(annotations)
"Annotations are the annotations to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.annotations\". - It is up to Secret Store implementation for others store types."
fn spec.publishConnectionDetailsTo.metadata.withAnnotationsMixin
withAnnotationsMixin(annotations)
"Annotations are the annotations to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.annotations\". - It is up to Secret Store implementation for others store types."
Note: This function appends passed data to existing values
fn spec.publishConnectionDetailsTo.metadata.withLabels
withLabels(labels)
"Labels are the labels/tags to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.labels\". - It is up to Secret Store implementation for others store types."
fn spec.publishConnectionDetailsTo.metadata.withLabelsMixin
withLabelsMixin(labels)
"Labels are the labels/tags to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.labels\". - It is up to Secret Store implementation for others store types."
Note: This function appends passed data to existing values
fn spec.publishConnectionDetailsTo.metadata.withType
withType(type)
"Type is the SecretType for the connection secret. - Only valid for Kubernetes Secret Stores."
obj spec.writeConnectionSecretToRef
"WriteConnectionSecretToReference specifies the namespace and name of a Secret to which any connection details for this managed resource should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource. This field is planned to be replaced in a future release in favor of PublishConnectionDetailsTo. Currently, both could be set independently and connection details would be published to both without affecting each other."
fn spec.writeConnectionSecretToRef.withName
withName(name)
"Name of the secret."
fn spec.writeConnectionSecretToRef.withNamespace
withNamespace(namespace)
"Namespace of the secret."