bigquery.v1beta1.job
"Job is the Schema for the Jobs API. Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data."
Index
fn new(name)
obj metadata
fn withAnnotations(annotations)
fn withAnnotationsMixin(annotations)
fn withClusterName(clusterName)
fn withCreationTimestamp(creationTimestamp)
fn withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)
fn withDeletionTimestamp(deletionTimestamp)
fn withFinalizers(finalizers)
fn withFinalizersMixin(finalizers)
fn withGenerateName(generateName)
fn withGeneration(generation)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withName(name)
fn withNamespace(namespace)
fn withOwnerReferences(ownerReferences)
fn withOwnerReferencesMixin(ownerReferences)
fn withResourceVersion(resourceVersion)
fn withSelfLink(selfLink)
fn withUid(uid)
obj spec
fn withDeletionPolicy(deletionPolicy)
fn withManagementPolicies(managementPolicies)
fn withManagementPoliciesMixin(managementPolicies)
obj spec.forProvider
fn withCopy(copy)
fn withCopyMixin(copy)
fn withExtract(extract)
fn withExtractMixin(extract)
fn withJobId(jobId)
fn withJobTimeoutMs(jobTimeoutMs)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withLoad(load)
fn withLoadMixin(load)
fn withLocation(location)
fn withProject(project)
fn withQuery(query)
fn withQueryMixin(query)
obj spec.forProvider.copy
fn withCreateDisposition(createDisposition)
fn withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
fn withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
fn withDestinationTable(destinationTable)
fn withDestinationTableMixin(destinationTable)
fn withSourceTables(sourceTables)
fn withSourceTablesMixin(sourceTables)
fn withWriteDisposition(writeDisposition)
obj spec.forProvider.copy.destinationEncryptionConfiguration
obj spec.forProvider.copy.destinationTable
fn withDatasetId(datasetId)
fn withProjectId(projectId)
fn withTableId(tableId)
obj spec.forProvider.copy.destinationTable.datasetIdRef
obj spec.forProvider.copy.destinationTable.datasetIdSelector
obj spec.forProvider.copy.destinationTable.tableIdRef
obj spec.forProvider.copy.destinationTable.tableIdSelector
obj spec.forProvider.copy.sourceTables
obj spec.forProvider.extract
fn withCompression(compression)
fn withDestinationFormat(destinationFormat)
fn withDestinationUris(destinationUris)
fn withDestinationUrisMixin(destinationUris)
fn withFieldDelimiter(fieldDelimiter)
fn withPrintHeader(printHeader)
fn withSourceModel(sourceModel)
fn withSourceModelMixin(sourceModel)
fn withSourceTable(sourceTable)
fn withSourceTableMixin(sourceTable)
fn withUseAvroLogicalTypes(useAvroLogicalTypes)
obj spec.forProvider.extract.sourceModel
obj spec.forProvider.extract.sourceTable
fn withDatasetId(datasetId)
fn withProjectId(projectId)
fn withTableId(tableId)
obj spec.forProvider.extract.sourceTable.datasetIdRef
obj spec.forProvider.extract.sourceTable.datasetIdSelector
obj spec.forProvider.extract.sourceTable.tableIdRef
obj spec.forProvider.extract.sourceTable.tableIdSelector
obj spec.forProvider.load
fn withAllowJaggedRows(allowJaggedRows)
fn withAllowQuotedNewlines(allowQuotedNewlines)
fn withAutodetect(autodetect)
fn withCreateDisposition(createDisposition)
fn withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
fn withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
fn withDestinationTable(destinationTable)
fn withDestinationTableMixin(destinationTable)
fn withEncoding(encoding)
fn withFieldDelimiter(fieldDelimiter)
fn withIgnoreUnknownValues(ignoreUnknownValues)
fn withJsonExtension(jsonExtension)
fn withMaxBadRecords(maxBadRecords)
fn withNullMarker(nullMarker)
fn withParquetOptions(parquetOptions)
fn withParquetOptionsMixin(parquetOptions)
fn withProjectionFields(projectionFields)
fn withProjectionFieldsMixin(projectionFields)
fn withQuote(quote)
fn withSchemaUpdateOptions(schemaUpdateOptions)
fn withSchemaUpdateOptionsMixin(schemaUpdateOptions)
fn withSkipLeadingRows(skipLeadingRows)
fn withSourceFormat(sourceFormat)
fn withSourceUris(sourceUris)
fn withSourceUrisMixin(sourceUris)
fn withTimePartitioning(timePartitioning)
fn withTimePartitioningMixin(timePartitioning)
fn withWriteDisposition(writeDisposition)
obj spec.forProvider.load.destinationEncryptionConfiguration
obj spec.forProvider.load.destinationTable
fn withDatasetId(datasetId)
fn withProjectId(projectId)
fn withTableId(tableId)
obj spec.forProvider.load.destinationTable.datasetIdRef
obj spec.forProvider.load.destinationTable.datasetIdSelector
obj spec.forProvider.load.destinationTable.tableIdRef
obj spec.forProvider.load.destinationTable.tableIdSelector
obj spec.forProvider.load.parquetOptions
obj spec.forProvider.load.timePartitioning
obj spec.forProvider.query
fn withAllowLargeResults(allowLargeResults)
fn withCreateDisposition(createDisposition)
fn withDefaultDataset(defaultDataset)
fn withDefaultDatasetMixin(defaultDataset)
fn withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
fn withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
fn withDestinationTable(destinationTable)
fn withDestinationTableMixin(destinationTable)
fn withFlattenResults(flattenResults)
fn withMaximumBillingTier(maximumBillingTier)
fn withMaximumBytesBilled(maximumBytesBilled)
fn withParameterMode(parameterMode)
fn withPriority(priority)
fn withQuery(query)
fn withSchemaUpdateOptions(schemaUpdateOptions)
fn withSchemaUpdateOptionsMixin(schemaUpdateOptions)
fn withScriptOptions(scriptOptions)
fn withScriptOptionsMixin(scriptOptions)
fn withUseLegacySql(useLegacySql)
fn withUseQueryCache(useQueryCache)
fn withUserDefinedFunctionResources(userDefinedFunctionResources)
fn withUserDefinedFunctionResourcesMixin(userDefinedFunctionResources)
fn withWriteDisposition(writeDisposition)
obj spec.forProvider.query.defaultDataset
obj spec.forProvider.query.destinationEncryptionConfiguration
obj spec.forProvider.query.destinationTable
fn withDatasetId(datasetId)
fn withProjectId(projectId)
fn withTableId(tableId)
obj spec.forProvider.query.destinationTable.datasetIdRef
obj spec.forProvider.query.destinationTable.datasetIdSelector
obj spec.forProvider.query.destinationTable.tableIdRef
obj spec.forProvider.query.destinationTable.tableIdSelector
obj spec.forProvider.query.scriptOptions
obj spec.forProvider.query.userDefinedFunctionResources
obj spec.initProvider
fn withCopy(copy)
fn withCopyMixin(copy)
fn withExtract(extract)
fn withExtractMixin(extract)
fn withJobId(jobId)
fn withJobTimeoutMs(jobTimeoutMs)
fn withLabels(labels)
fn withLabelsMixin(labels)
fn withLoad(load)
fn withLoadMixin(load)
fn withLocation(location)
fn withProject(project)
fn withQuery(query)
fn withQueryMixin(query)
obj spec.initProvider.copy
fn withCreateDisposition(createDisposition)
fn withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
fn withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
fn withDestinationTable(destinationTable)
fn withDestinationTableMixin(destinationTable)
fn withSourceTables(sourceTables)
fn withSourceTablesMixin(sourceTables)
fn withWriteDisposition(writeDisposition)
obj spec.initProvider.copy.destinationTable
obj spec.initProvider.copy.sourceTables
obj spec.initProvider.extract
fn withCompression(compression)
fn withDestinationFormat(destinationFormat)
fn withDestinationUris(destinationUris)
fn withDestinationUrisMixin(destinationUris)
fn withFieldDelimiter(fieldDelimiter)
fn withPrintHeader(printHeader)
fn withSourceModel(sourceModel)
fn withSourceModelMixin(sourceModel)
fn withSourceTable(sourceTable)
fn withSourceTableMixin(sourceTable)
fn withUseAvroLogicalTypes(useAvroLogicalTypes)
obj spec.initProvider.extract.sourceModel
obj spec.initProvider.extract.sourceTable
obj spec.initProvider.load
fn withAllowJaggedRows(allowJaggedRows)
fn withAllowQuotedNewlines(allowQuotedNewlines)
fn withAutodetect(autodetect)
fn withCreateDisposition(createDisposition)
fn withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
fn withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
fn withDestinationTable(destinationTable)
fn withDestinationTableMixin(destinationTable)
fn withEncoding(encoding)
fn withFieldDelimiter(fieldDelimiter)
fn withIgnoreUnknownValues(ignoreUnknownValues)
fn withJsonExtension(jsonExtension)
fn withMaxBadRecords(maxBadRecords)
fn withNullMarker(nullMarker)
fn withParquetOptions(parquetOptions)
fn withParquetOptionsMixin(parquetOptions)
fn withProjectionFields(projectionFields)
fn withProjectionFieldsMixin(projectionFields)
fn withQuote(quote)
fn withSchemaUpdateOptions(schemaUpdateOptions)
fn withSchemaUpdateOptionsMixin(schemaUpdateOptions)
fn withSkipLeadingRows(skipLeadingRows)
fn withSourceFormat(sourceFormat)
fn withSourceUris(sourceUris)
fn withSourceUrisMixin(sourceUris)
fn withTimePartitioning(timePartitioning)
fn withTimePartitioningMixin(timePartitioning)
fn withWriteDisposition(writeDisposition)
obj spec.initProvider.load.destinationEncryptionConfiguration
obj spec.initProvider.load.destinationTable
obj spec.initProvider.load.parquetOptions
obj spec.initProvider.load.timePartitioning
obj spec.initProvider.query
fn withAllowLargeResults(allowLargeResults)
fn withCreateDisposition(createDisposition)
fn withDefaultDataset(defaultDataset)
fn withDefaultDatasetMixin(defaultDataset)
fn withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
fn withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
fn withDestinationTable(destinationTable)
fn withDestinationTableMixin(destinationTable)
fn withFlattenResults(flattenResults)
fn withMaximumBillingTier(maximumBillingTier)
fn withMaximumBytesBilled(maximumBytesBilled)
fn withParameterMode(parameterMode)
fn withPriority(priority)
fn withQuery(query)
fn withSchemaUpdateOptions(schemaUpdateOptions)
fn withSchemaUpdateOptionsMixin(schemaUpdateOptions)
fn withScriptOptions(scriptOptions)
fn withScriptOptionsMixin(scriptOptions)
fn withUseLegacySql(useLegacySql)
fn withUseQueryCache(useQueryCache)
fn withUserDefinedFunctionResources(userDefinedFunctionResources)
fn withUserDefinedFunctionResourcesMixin(userDefinedFunctionResources)
fn withWriteDisposition(writeDisposition)
obj spec.initProvider.query.defaultDataset
obj spec.initProvider.query.destinationEncryptionConfiguration
obj spec.initProvider.query.destinationTable
obj spec.initProvider.query.scriptOptions
obj spec.initProvider.query.userDefinedFunctionResources
obj spec.providerConfigRef
obj spec.providerRef
obj spec.publishConnectionDetailsTo
obj spec.writeConnectionSecretToRef
Fields
fn new
new(name)
new returns an instance of Job
obj metadata
"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create."
fn metadata.withAnnotations
withAnnotations(annotations)
"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"
fn metadata.withAnnotationsMixin
withAnnotationsMixin(annotations)
"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations"
Note: This function appends passed data to existing values
fn metadata.withClusterName
withClusterName(clusterName)
"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request."
fn metadata.withCreationTimestamp
withCreationTimestamp(creationTimestamp)
"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."
fn metadata.withDeletionGracePeriodSeconds
withDeletionGracePeriodSeconds(deletionGracePeriodSeconds)
"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only."
fn metadata.withDeletionTimestamp
withDeletionTimestamp(deletionTimestamp)
"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers."
fn metadata.withFinalizers
withFinalizers(finalizers)
"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."
fn metadata.withFinalizersMixin
withFinalizersMixin(finalizers)
"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list."
Note: This function appends passed data to existing values
fn metadata.withGenerateName
withGenerateName(generateName)
"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency"
fn metadata.withGeneration
withGeneration(generation)
"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only."
fn metadata.withLabels
withLabels(labels)
"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"
fn metadata.withLabelsMixin
withLabelsMixin(labels)
"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels"
Note: This function appends passed data to existing values
fn metadata.withName
withName(name)
"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names"
fn metadata.withNamespace
withNamespace(namespace)
"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"
fn metadata.withOwnerReferences
withOwnerReferences(ownerReferences)
"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."
fn metadata.withOwnerReferencesMixin
withOwnerReferencesMixin(ownerReferences)
"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller."
Note: This function appends passed data to existing values
fn metadata.withResourceVersion
withResourceVersion(resourceVersion)
"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency"
fn metadata.withSelfLink
withSelfLink(selfLink)
"SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release."
fn metadata.withUid
withUid(uid)
"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
obj spec
"JobSpec defines the desired state of Job"
fn spec.withDeletionPolicy
withDeletionPolicy(deletionPolicy)
"DeletionPolicy specifies what will happen to the underlying external when this managed resource is deleted - either \"Delete\" or \"Orphan\" the external resource. This field is planned to be deprecated in favor of the ManagementPolicies field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223"
fn spec.withManagementPolicies
withManagementPolicies(managementPolicies)
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. ManagementPolicies specify the array of actions Crossplane is allowed to take on the managed and external resources. This field is planned to replace the DeletionPolicy field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. If both are custom, the DeletionPolicy field will be ignored. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md"
fn spec.withManagementPoliciesMixin
withManagementPoliciesMixin(managementPolicies)
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. ManagementPolicies specify the array of actions Crossplane is allowed to take on the managed and external resources. This field is planned to replace the DeletionPolicy field in a future release. Currently, both could be set independently and non-default values would be honored if the feature flag is enabled. If both are custom, the DeletionPolicy field will be ignored. See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md"
Note: This function appends passed data to existing values
obj spec.forProvider
fn spec.forProvider.withCopy
withCopy(copy)
"Copies a table. Structure is documented below."
fn spec.forProvider.withCopyMixin
withCopyMixin(copy)
"Copies a table. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.withExtract
withExtract(extract)
"Configures an extract job. Structure is documented below."
fn spec.forProvider.withExtractMixin
withExtractMixin(extract)
"Configures an extract job. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.withJobId
withJobId(jobId)
"The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters."
fn spec.forProvider.withJobTimeoutMs
withJobTimeoutMs(jobTimeoutMs)
"Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job."
fn spec.forProvider.withLabels
withLabels(labels)
"The labels associated with this job. You can use these to organize and group your jobs."
fn spec.forProvider.withLabelsMixin
withLabelsMixin(labels)
"The labels associated with this job. You can use these to organize and group your jobs."
Note: This function appends passed data to existing values
fn spec.forProvider.withLoad
withLoad(load)
"Configures a load job. Structure is documented below."
fn spec.forProvider.withLoadMixin
withLoadMixin(load)
"Configures a load job. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.withLocation
withLocation(location)
"The geographic location of the job. The default value is US."
fn spec.forProvider.withProject
withProject(project)
"The ID of the project in which the resource belongs. If it is not provided, the provider project is used."
fn spec.forProvider.withQuery
withQuery(query)
"Configures a query job. Structure is documented below."
fn spec.forProvider.withQueryMixin
withQueryMixin(query)
"Configures a query job. Structure is documented below."
Note: This function appends passed data to existing values
obj spec.forProvider.copy
"Copies a table. Structure is documented below."
fn spec.forProvider.copy.withCreateDisposition
withCreateDisposition(createDisposition)
"Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER."
fn spec.forProvider.copy.withDestinationEncryptionConfiguration
withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.forProvider.copy.withDestinationEncryptionConfigurationMixin
withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.copy.withDestinationTable
withDestinationTable(destinationTable)
"The destination table. Structure is documented below."
fn spec.forProvider.copy.withDestinationTableMixin
withDestinationTableMixin(destinationTable)
"The destination table. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.copy.withSourceTables
withSourceTables(sourceTables)
"Source tables to copy. Structure is documented below."
fn spec.forProvider.copy.withSourceTablesMixin
withSourceTablesMixin(sourceTables)
"Source tables to copy. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.copy.withWriteDisposition
withWriteDisposition(writeDisposition)
"Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY."
obj spec.forProvider.copy.destinationEncryptionConfiguration
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.forProvider.copy.destinationEncryptionConfiguration.withKmsKeyName
withKmsKeyName(kmsKeyName)
"Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key."
obj spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameRef
"Reference to a CryptoKey in kms to populate kmsKeyName."
fn spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameRef.policy
"Policies for referencing."
fn spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameSelector
"Selector for a CryptoKey in kms to populate kmsKeyName."
fn spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameSelector.policy
"Policies for selection."
fn spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.copy.destinationEncryptionConfiguration.kmsKeyNameSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.copy.destinationTable
"The destination table. Structure is documented below."
fn spec.forProvider.copy.destinationTable.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.forProvider.copy.destinationTable.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
fn spec.forProvider.copy.destinationTable.withTableId
withTableId(tableId)
"The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not."
obj spec.forProvider.copy.destinationTable.datasetIdRef
"Reference to a Dataset in bigquery to populate datasetId."
fn spec.forProvider.copy.destinationTable.datasetIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.copy.destinationTable.datasetIdRef.policy
"Policies for referencing."
fn spec.forProvider.copy.destinationTable.datasetIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.copy.destinationTable.datasetIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.copy.destinationTable.datasetIdSelector
"Selector for a Dataset in bigquery to populate datasetId."
fn spec.forProvider.copy.destinationTable.datasetIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.copy.destinationTable.datasetIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.copy.destinationTable.datasetIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.copy.destinationTable.datasetIdSelector.policy
"Policies for selection."
fn spec.forProvider.copy.destinationTable.datasetIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.copy.destinationTable.datasetIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.copy.destinationTable.tableIdRef
"Reference to a Table in bigquery to populate tableId."
fn spec.forProvider.copy.destinationTable.tableIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.copy.destinationTable.tableIdRef.policy
"Policies for referencing."
fn spec.forProvider.copy.destinationTable.tableIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.copy.destinationTable.tableIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.copy.destinationTable.tableIdSelector
"Selector for a Table in bigquery to populate tableId."
fn spec.forProvider.copy.destinationTable.tableIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.copy.destinationTable.tableIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.copy.destinationTable.tableIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.copy.destinationTable.tableIdSelector.policy
"Policies for selection."
fn spec.forProvider.copy.destinationTable.tableIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.copy.destinationTable.tableIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.copy.sourceTables
"Source tables to copy. Structure is documented below."
fn spec.forProvider.copy.sourceTables.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.forProvider.copy.sourceTables.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
fn spec.forProvider.copy.sourceTables.withTableId
withTableId(tableId)
"The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not."
obj spec.forProvider.extract
"Configures an extract job. Structure is documented below."
fn spec.forProvider.extract.withCompression
withCompression(compression)
"The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro."
fn spec.forProvider.extract.withDestinationFormat
withDestinationFormat(destinationFormat)
"The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL."
fn spec.forProvider.extract.withDestinationUris
withDestinationUris(destinationUris)
"A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written."
fn spec.forProvider.extract.withDestinationUrisMixin
withDestinationUrisMixin(destinationUris)
"A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written."
Note: This function appends passed data to existing values
fn spec.forProvider.extract.withFieldDelimiter
withFieldDelimiter(fieldDelimiter)
"When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','"
fn spec.forProvider.extract.withPrintHeader
withPrintHeader(printHeader)
"Whether to print out a header row in the results. Default is true."
fn spec.forProvider.extract.withSourceModel
withSourceModel(sourceModel)
"A reference to the model being exported. Structure is documented below."
fn spec.forProvider.extract.withSourceModelMixin
withSourceModelMixin(sourceModel)
"A reference to the model being exported. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.extract.withSourceTable
withSourceTable(sourceTable)
"A reference to the table being exported. Structure is documented below."
fn spec.forProvider.extract.withSourceTableMixin
withSourceTableMixin(sourceTable)
"A reference to the table being exported. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.extract.withUseAvroLogicalTypes
withUseAvroLogicalTypes(useAvroLogicalTypes)
"Whether to use logical types when extracting to AVRO format."
obj spec.forProvider.extract.sourceModel
"A reference to the model being exported. Structure is documented below."
fn spec.forProvider.extract.sourceModel.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.forProvider.extract.sourceModel.withModelId
withModelId(modelId)
"The ID of the model."
fn spec.forProvider.extract.sourceModel.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
obj spec.forProvider.extract.sourceTable
"A reference to the table being exported. Structure is documented below."
fn spec.forProvider.extract.sourceTable.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.forProvider.extract.sourceTable.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
fn spec.forProvider.extract.sourceTable.withTableId
withTableId(tableId)
"The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not."
obj spec.forProvider.extract.sourceTable.datasetIdRef
"Reference to a Dataset in bigquery to populate datasetId."
fn spec.forProvider.extract.sourceTable.datasetIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.extract.sourceTable.datasetIdRef.policy
"Policies for referencing."
fn spec.forProvider.extract.sourceTable.datasetIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.extract.sourceTable.datasetIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.extract.sourceTable.datasetIdSelector
"Selector for a Dataset in bigquery to populate datasetId."
fn spec.forProvider.extract.sourceTable.datasetIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.extract.sourceTable.datasetIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.extract.sourceTable.datasetIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.extract.sourceTable.datasetIdSelector.policy
"Policies for selection."
fn spec.forProvider.extract.sourceTable.datasetIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.extract.sourceTable.datasetIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.extract.sourceTable.tableIdRef
"Reference to a Table in bigquery to populate tableId."
fn spec.forProvider.extract.sourceTable.tableIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.extract.sourceTable.tableIdRef.policy
"Policies for referencing."
fn spec.forProvider.extract.sourceTable.tableIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.extract.sourceTable.tableIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.extract.sourceTable.tableIdSelector
"Selector for a Table in bigquery to populate tableId."
fn spec.forProvider.extract.sourceTable.tableIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.extract.sourceTable.tableIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.extract.sourceTable.tableIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.extract.sourceTable.tableIdSelector.policy
"Policies for selection."
fn spec.forProvider.extract.sourceTable.tableIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.extract.sourceTable.tableIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.load
"Configures a load job. Structure is documented below."
fn spec.forProvider.load.withAllowJaggedRows
withAllowJaggedRows(allowJaggedRows)
"Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats."
fn spec.forProvider.load.withAllowQuotedNewlines
withAllowQuotedNewlines(allowQuotedNewlines)
"Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."
fn spec.forProvider.load.withAutodetect
withAutodetect(autodetect)
"Indicates if we should automatically infer the options and schema for CSV and JSON sources."
fn spec.forProvider.load.withCreateDisposition
withCreateDisposition(createDisposition)
"Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER."
fn spec.forProvider.load.withDestinationEncryptionConfiguration
withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.forProvider.load.withDestinationEncryptionConfigurationMixin
withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.load.withDestinationTable
withDestinationTable(destinationTable)
"The destination table to load the data into. Structure is documented below."
fn spec.forProvider.load.withDestinationTableMixin
withDestinationTableMixin(destinationTable)
"The destination table to load the data into. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.load.withEncoding
withEncoding(encoding)
"The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties."
fn spec.forProvider.load.withFieldDelimiter
withFieldDelimiter(fieldDelimiter)
"The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\t\" to specify a tab separator. The default value is a comma (',')."
fn spec.forProvider.load.withIgnoreUnknownValues
withIgnoreUnknownValues(ignoreUnknownValues)
"Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names"
fn spec.forProvider.load.withJsonExtension
withJsonExtension(jsonExtension)
"If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON."
fn spec.forProvider.load.withMaxBadRecords
withMaxBadRecords(maxBadRecords)
"The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid."
fn spec.forProvider.load.withNullMarker
withNullMarker(nullMarker)
"Specifies a string that represents a null value in a CSV file. For example, if you specify \"\N\", BigQuery interprets \"\N\" as a null value when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value."
fn spec.forProvider.load.withParquetOptions
withParquetOptions(parquetOptions)
"Parquet Options for load and make external tables. Structure is documented below."
fn spec.forProvider.load.withParquetOptionsMixin
withParquetOptionsMixin(parquetOptions)
"Parquet Options for load and make external tables. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.load.withProjectionFields
withProjectionFields(projectionFields)
"If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result."
fn spec.forProvider.load.withProjectionFieldsMixin
withProjectionFieldsMixin(projectionFields)
"If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result."
Note: This function appends passed data to existing values
fn spec.forProvider.load.withQuote
withQuote(quote)
"The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true."
fn spec.forProvider.load.withSchemaUpdateOptions
withSchemaUpdateOptions(schemaUpdateOptions)
"Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable."
fn spec.forProvider.load.withSchemaUpdateOptionsMixin
withSchemaUpdateOptionsMixin(schemaUpdateOptions)
"Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable."
Note: This function appends passed data to existing values
fn spec.forProvider.load.withSkipLeadingRows
withSkipLeadingRows(skipLeadingRows)
"The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema."
fn spec.forProvider.load.withSourceFormat
withSourceFormat(sourceFormat)
"The format of the data files. For CSV files, specify \"CSV\". For datastore backups, specify \"DATASTORE_BACKUP\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". For Avro, specify \"AVRO\". For parquet, specify \"PARQUET\". For orc, specify \"ORC\". [Beta] For Bigtable, specify \"BIGTABLE\". The default value is CSV."
fn spec.forProvider.load.withSourceUris
withSourceUris(sourceUris)
"The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '' wildcard character is not allowed."
fn spec.forProvider.load.withSourceUrisMixin
withSourceUrisMixin(sourceUris)
"The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '' wildcard character is not allowed."
Note: This function appends passed data to existing values
fn spec.forProvider.load.withTimePartitioning
withTimePartitioning(timePartitioning)
"Time-based partitioning specification for the destination table. Structure is documented below."
fn spec.forProvider.load.withTimePartitioningMixin
withTimePartitioningMixin(timePartitioning)
"Time-based partitioning specification for the destination table. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.load.withWriteDisposition
withWriteDisposition(writeDisposition)
"Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY."
obj spec.forProvider.load.destinationEncryptionConfiguration
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.forProvider.load.destinationEncryptionConfiguration.withKmsKeyName
withKmsKeyName(kmsKeyName)
"Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key."
obj spec.forProvider.load.destinationTable
"The destination table to load the data into. Structure is documented below."
fn spec.forProvider.load.destinationTable.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.forProvider.load.destinationTable.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
fn spec.forProvider.load.destinationTable.withTableId
withTableId(tableId)
"The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not."
obj spec.forProvider.load.destinationTable.datasetIdRef
"Reference to a Dataset in bigquery to populate datasetId."
fn spec.forProvider.load.destinationTable.datasetIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.load.destinationTable.datasetIdRef.policy
"Policies for referencing."
fn spec.forProvider.load.destinationTable.datasetIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.load.destinationTable.datasetIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.load.destinationTable.datasetIdSelector
"Selector for a Dataset in bigquery to populate datasetId."
fn spec.forProvider.load.destinationTable.datasetIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.load.destinationTable.datasetIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.load.destinationTable.datasetIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.load.destinationTable.datasetIdSelector.policy
"Policies for selection."
fn spec.forProvider.load.destinationTable.datasetIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.load.destinationTable.datasetIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.load.destinationTable.tableIdRef
"Reference to a Table in bigquery to populate tableId."
fn spec.forProvider.load.destinationTable.tableIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.load.destinationTable.tableIdRef.policy
"Policies for referencing."
fn spec.forProvider.load.destinationTable.tableIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.load.destinationTable.tableIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.load.destinationTable.tableIdSelector
"Selector for a Table in bigquery to populate tableId."
fn spec.forProvider.load.destinationTable.tableIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.load.destinationTable.tableIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.load.destinationTable.tableIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.load.destinationTable.tableIdSelector.policy
"Policies for selection."
fn spec.forProvider.load.destinationTable.tableIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.load.destinationTable.tableIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.load.parquetOptions
"Parquet Options for load and make external tables. Structure is documented below."
fn spec.forProvider.load.parquetOptions.withEnableListInference
withEnableListInference(enableListInference)
"If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type."
fn spec.forProvider.load.parquetOptions.withEnumAsString
withEnumAsString(enumAsString)
"If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default."
obj spec.forProvider.load.timePartitioning
"Time-based partitioning specification for the destination table. Structure is documented below."
fn spec.forProvider.load.timePartitioning.withExpirationMs
withExpirationMs(expirationMs)
"Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value."
fn spec.forProvider.load.timePartitioning.withField
withField(field)
"If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value."
fn spec.forProvider.load.timePartitioning.withType
withType(type)
"The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset."
obj spec.forProvider.query
"Configures a query job. Structure is documented below."
fn spec.forProvider.query.withAllowLargeResults
withAllowLargeResults(allowLargeResults)
"If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size."
fn spec.forProvider.query.withCreateDisposition
withCreateDisposition(createDisposition)
"Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER."
fn spec.forProvider.query.withDefaultDataset
withDefaultDataset(defaultDataset)
"Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below."
fn spec.forProvider.query.withDefaultDatasetMixin
withDefaultDatasetMixin(defaultDataset)
"Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.query.withDestinationEncryptionConfiguration
withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.forProvider.query.withDestinationEncryptionConfigurationMixin
withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.query.withDestinationTable
withDestinationTable(destinationTable)
"Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below."
fn spec.forProvider.query.withDestinationTableMixin
withDestinationTableMixin(destinationTable)
"Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.query.withFlattenResults
withFlattenResults(flattenResults)
"If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened."
fn spec.forProvider.query.withMaximumBillingTier
withMaximumBillingTier(maximumBillingTier)
"Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default."
fn spec.forProvider.query.withMaximumBytesBilled
withMaximumBytesBilled(maximumBytesBilled)
"Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default."
fn spec.forProvider.query.withParameterMode
withParameterMode(parameterMode)
"Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query."
fn spec.forProvider.query.withPriority
withPriority(priority)
"Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH."
fn spec.forProvider.query.withQuery
withQuery(query)
"SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = \"\" and write_disposition = \"\"."
fn spec.forProvider.query.withSchemaUpdateOptions
withSchemaUpdateOptions(schemaUpdateOptions)
"Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable."
fn spec.forProvider.query.withSchemaUpdateOptionsMixin
withSchemaUpdateOptionsMixin(schemaUpdateOptions)
"Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable."
Note: This function appends passed data to existing values
fn spec.forProvider.query.withScriptOptions
withScriptOptions(scriptOptions)
"Options controlling the execution of scripts. Structure is documented below."
fn spec.forProvider.query.withScriptOptionsMixin
withScriptOptionsMixin(scriptOptions)
"Options controlling the execution of scripts. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.query.withUseLegacySql
withUseLegacySql(useLegacySql)
"Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL."
fn spec.forProvider.query.withUseQueryCache
withUseQueryCache(useQueryCache)
"Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true."
fn spec.forProvider.query.withUserDefinedFunctionResources
withUserDefinedFunctionResources(userDefinedFunctionResources)
"Describes user-defined function resources used in the query. Structure is documented below."
fn spec.forProvider.query.withUserDefinedFunctionResourcesMixin
withUserDefinedFunctionResourcesMixin(userDefinedFunctionResources)
"Describes user-defined function resources used in the query. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.forProvider.query.withWriteDisposition
withWriteDisposition(writeDisposition)
"Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY."
obj spec.forProvider.query.defaultDataset
"Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below."
fn spec.forProvider.query.defaultDataset.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.forProvider.query.defaultDataset.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
obj spec.forProvider.query.defaultDataset.datasetIdRef
"Reference to a Dataset in bigquery to populate datasetId."
fn spec.forProvider.query.defaultDataset.datasetIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.query.defaultDataset.datasetIdRef.policy
"Policies for referencing."
fn spec.forProvider.query.defaultDataset.datasetIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.query.defaultDataset.datasetIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.query.defaultDataset.datasetIdSelector
"Selector for a Dataset in bigquery to populate datasetId."
fn spec.forProvider.query.defaultDataset.datasetIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.query.defaultDataset.datasetIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.query.defaultDataset.datasetIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.query.defaultDataset.datasetIdSelector.policy
"Policies for selection."
fn spec.forProvider.query.defaultDataset.datasetIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.query.defaultDataset.datasetIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.query.destinationEncryptionConfiguration
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.forProvider.query.destinationEncryptionConfiguration.withKmsKeyName
withKmsKeyName(kmsKeyName)
"Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key."
obj spec.forProvider.query.destinationTable
"Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below."
fn spec.forProvider.query.destinationTable.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.forProvider.query.destinationTable.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
fn spec.forProvider.query.destinationTable.withTableId
withTableId(tableId)
"The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not."
obj spec.forProvider.query.destinationTable.datasetIdRef
"Reference to a Dataset in bigquery to populate datasetId."
fn spec.forProvider.query.destinationTable.datasetIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.query.destinationTable.datasetIdRef.policy
"Policies for referencing."
fn spec.forProvider.query.destinationTable.datasetIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.query.destinationTable.datasetIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.query.destinationTable.datasetIdSelector
"Selector for a Dataset in bigquery to populate datasetId."
fn spec.forProvider.query.destinationTable.datasetIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.query.destinationTable.datasetIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.query.destinationTable.datasetIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.query.destinationTable.datasetIdSelector.policy
"Policies for selection."
fn spec.forProvider.query.destinationTable.datasetIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.query.destinationTable.datasetIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.query.destinationTable.tableIdRef
"Reference to a Table in bigquery to populate tableId."
fn spec.forProvider.query.destinationTable.tableIdRef.withName
withName(name)
"Name of the referenced object."
obj spec.forProvider.query.destinationTable.tableIdRef.policy
"Policies for referencing."
fn spec.forProvider.query.destinationTable.tableIdRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.query.destinationTable.tableIdRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.query.destinationTable.tableIdSelector
"Selector for a Table in bigquery to populate tableId."
fn spec.forProvider.query.destinationTable.tableIdSelector.withMatchControllerRef
withMatchControllerRef(matchControllerRef)
"MatchControllerRef ensures an object with the same controller reference as the selecting object is selected."
fn spec.forProvider.query.destinationTable.tableIdSelector.withMatchLabels
withMatchLabels(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
fn spec.forProvider.query.destinationTable.tableIdSelector.withMatchLabelsMixin
withMatchLabelsMixin(matchLabels)
"MatchLabels ensures an object with matching labels is selected."
Note: This function appends passed data to existing values
obj spec.forProvider.query.destinationTable.tableIdSelector.policy
"Policies for selection."
fn spec.forProvider.query.destinationTable.tableIdSelector.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.forProvider.query.destinationTable.tableIdSelector.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.forProvider.query.scriptOptions
"Options controlling the execution of scripts. Structure is documented below."
fn spec.forProvider.query.scriptOptions.withKeyResultStatement
withKeyResultStatement(keyResultStatement)
"Determines which statement in the script represents the \"key result\", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT."
fn spec.forProvider.query.scriptOptions.withStatementByteBudget
withStatementByteBudget(statementByteBudget)
"Limit on the number of bytes billed per statement. Exceeding this budget results in an error."
fn spec.forProvider.query.scriptOptions.withStatementTimeoutMs
withStatementTimeoutMs(statementTimeoutMs)
"Timeout period for each statement in a script."
obj spec.forProvider.query.userDefinedFunctionResources
"Describes user-defined function resources used in the query. Structure is documented below."
fn spec.forProvider.query.userDefinedFunctionResources.withInlineCode
withInlineCode(inlineCode)
"An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code."
fn spec.forProvider.query.userDefinedFunctionResources.withResourceUri
withResourceUri(resourceUri)
"A code resource to load from a Google Cloud Storage URI (gs://bucket/path)."
obj spec.initProvider
"THIS IS AN ALPHA FIELD. Do not use it in production. It is not honored unless the relevant Crossplane feature flag is enabled, and may be changed or removed without notice. InitProvider holds the same fields as ForProvider, with the exception of Identifier and other resource reference fields. The fields that are in InitProvider are merged into ForProvider when the resource is created. The same fields are also added to the terraform ignore_changes hook, to avoid updating them after creation. This is useful for fields that are required on creation, but we do not desire to update them after creation, for example because of an external controller is managing them, like an autoscaler."
fn spec.initProvider.withCopy
withCopy(copy)
"Copies a table. Structure is documented below."
fn spec.initProvider.withCopyMixin
withCopyMixin(copy)
"Copies a table. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.withExtract
withExtract(extract)
"Configures an extract job. Structure is documented below."
fn spec.initProvider.withExtractMixin
withExtractMixin(extract)
"Configures an extract job. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.withJobId
withJobId(jobId)
"The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters."
fn spec.initProvider.withJobTimeoutMs
withJobTimeoutMs(jobTimeoutMs)
"Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job."
fn spec.initProvider.withLabels
withLabels(labels)
"The labels associated with this job. You can use these to organize and group your jobs."
fn spec.initProvider.withLabelsMixin
withLabelsMixin(labels)
"The labels associated with this job. You can use these to organize and group your jobs."
Note: This function appends passed data to existing values
fn spec.initProvider.withLoad
withLoad(load)
"Configures a load job. Structure is documented below."
fn spec.initProvider.withLoadMixin
withLoadMixin(load)
"Configures a load job. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.withLocation
withLocation(location)
"The geographic location of the job. The default value is US."
fn spec.initProvider.withProject
withProject(project)
"The ID of the project in which the resource belongs. If it is not provided, the provider project is used."
fn spec.initProvider.withQuery
withQuery(query)
"Configures a query job. Structure is documented below."
fn spec.initProvider.withQueryMixin
withQueryMixin(query)
"Configures a query job. Structure is documented below."
Note: This function appends passed data to existing values
obj spec.initProvider.copy
"Copies a table. Structure is documented below."
fn spec.initProvider.copy.withCreateDisposition
withCreateDisposition(createDisposition)
"Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER."
fn spec.initProvider.copy.withDestinationEncryptionConfiguration
withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.initProvider.copy.withDestinationEncryptionConfigurationMixin
withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.copy.withDestinationTable
withDestinationTable(destinationTable)
"The destination table. Structure is documented below."
fn spec.initProvider.copy.withDestinationTableMixin
withDestinationTableMixin(destinationTable)
"The destination table. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.copy.withSourceTables
withSourceTables(sourceTables)
"Source tables to copy. Structure is documented below."
fn spec.initProvider.copy.withSourceTablesMixin
withSourceTablesMixin(sourceTables)
"Source tables to copy. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.copy.withWriteDisposition
withWriteDisposition(writeDisposition)
"Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY."
obj spec.initProvider.copy.destinationTable
"The destination table. Structure is documented below."
fn spec.initProvider.copy.destinationTable.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
obj spec.initProvider.copy.sourceTables
"Source tables to copy. Structure is documented below."
fn spec.initProvider.copy.sourceTables.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.initProvider.copy.sourceTables.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
fn spec.initProvider.copy.sourceTables.withTableId
withTableId(tableId)
"The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not."
obj spec.initProvider.extract
"Configures an extract job. Structure is documented below."
fn spec.initProvider.extract.withCompression
withCompression(compression)
"The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro."
fn spec.initProvider.extract.withDestinationFormat
withDestinationFormat(destinationFormat)
"The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL."
fn spec.initProvider.extract.withDestinationUris
withDestinationUris(destinationUris)
"A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written."
fn spec.initProvider.extract.withDestinationUrisMixin
withDestinationUrisMixin(destinationUris)
"A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written."
Note: This function appends passed data to existing values
fn spec.initProvider.extract.withFieldDelimiter
withFieldDelimiter(fieldDelimiter)
"When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','"
fn spec.initProvider.extract.withPrintHeader
withPrintHeader(printHeader)
"Whether to print out a header row in the results. Default is true."
fn spec.initProvider.extract.withSourceModel
withSourceModel(sourceModel)
"A reference to the model being exported. Structure is documented below."
fn spec.initProvider.extract.withSourceModelMixin
withSourceModelMixin(sourceModel)
"A reference to the model being exported. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.extract.withSourceTable
withSourceTable(sourceTable)
"A reference to the table being exported. Structure is documented below."
fn spec.initProvider.extract.withSourceTableMixin
withSourceTableMixin(sourceTable)
"A reference to the table being exported. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.extract.withUseAvroLogicalTypes
withUseAvroLogicalTypes(useAvroLogicalTypes)
"Whether to use logical types when extracting to AVRO format."
obj spec.initProvider.extract.sourceModel
"A reference to the model being exported. Structure is documented below."
fn spec.initProvider.extract.sourceModel.withDatasetId
withDatasetId(datasetId)
"The ID of the dataset containing this table."
fn spec.initProvider.extract.sourceModel.withModelId
withModelId(modelId)
"The ID of the model."
fn spec.initProvider.extract.sourceModel.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
obj spec.initProvider.extract.sourceTable
"A reference to the table being exported. Structure is documented below."
fn spec.initProvider.extract.sourceTable.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
obj spec.initProvider.load
"Configures a load job. Structure is documented below."
fn spec.initProvider.load.withAllowJaggedRows
withAllowJaggedRows(allowJaggedRows)
"Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats."
fn spec.initProvider.load.withAllowQuotedNewlines
withAllowQuotedNewlines(allowQuotedNewlines)
"Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."
fn spec.initProvider.load.withAutodetect
withAutodetect(autodetect)
"Indicates if we should automatically infer the options and schema for CSV and JSON sources."
fn spec.initProvider.load.withCreateDisposition
withCreateDisposition(createDisposition)
"Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER."
fn spec.initProvider.load.withDestinationEncryptionConfiguration
withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.initProvider.load.withDestinationEncryptionConfigurationMixin
withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.load.withDestinationTable
withDestinationTable(destinationTable)
"The destination table to load the data into. Structure is documented below."
fn spec.initProvider.load.withDestinationTableMixin
withDestinationTableMixin(destinationTable)
"The destination table to load the data into. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.load.withEncoding
withEncoding(encoding)
"The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties."
fn spec.initProvider.load.withFieldDelimiter
withFieldDelimiter(fieldDelimiter)
"The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\t\" to specify a tab separator. The default value is a comma (',')."
fn spec.initProvider.load.withIgnoreUnknownValues
withIgnoreUnknownValues(ignoreUnknownValues)
"Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names"
fn spec.initProvider.load.withJsonExtension
withJsonExtension(jsonExtension)
"If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON."
fn spec.initProvider.load.withMaxBadRecords
withMaxBadRecords(maxBadRecords)
"The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid."
fn spec.initProvider.load.withNullMarker
withNullMarker(nullMarker)
"Specifies a string that represents a null value in a CSV file. For example, if you specify \"\N\", BigQuery interprets \"\N\" as a null value when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value."
fn spec.initProvider.load.withParquetOptions
withParquetOptions(parquetOptions)
"Parquet Options for load and make external tables. Structure is documented below."
fn spec.initProvider.load.withParquetOptionsMixin
withParquetOptionsMixin(parquetOptions)
"Parquet Options for load and make external tables. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.load.withProjectionFields
withProjectionFields(projectionFields)
"If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result."
fn spec.initProvider.load.withProjectionFieldsMixin
withProjectionFieldsMixin(projectionFields)
"If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result."
Note: This function appends passed data to existing values
fn spec.initProvider.load.withQuote
withQuote(quote)
"The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true."
fn spec.initProvider.load.withSchemaUpdateOptions
withSchemaUpdateOptions(schemaUpdateOptions)
"Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable."
fn spec.initProvider.load.withSchemaUpdateOptionsMixin
withSchemaUpdateOptionsMixin(schemaUpdateOptions)
"Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable."
Note: This function appends passed data to existing values
fn spec.initProvider.load.withSkipLeadingRows
withSkipLeadingRows(skipLeadingRows)
"The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema."
fn spec.initProvider.load.withSourceFormat
withSourceFormat(sourceFormat)
"The format of the data files. For CSV files, specify \"CSV\". For datastore backups, specify \"DATASTORE_BACKUP\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". For Avro, specify \"AVRO\". For parquet, specify \"PARQUET\". For orc, specify \"ORC\". [Beta] For Bigtable, specify \"BIGTABLE\". The default value is CSV."
fn spec.initProvider.load.withSourceUris
withSourceUris(sourceUris)
"The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '' wildcard character is not allowed."
fn spec.initProvider.load.withSourceUrisMixin
withSourceUrisMixin(sourceUris)
"The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '' wildcard character is not allowed."
Note: This function appends passed data to existing values
fn spec.initProvider.load.withTimePartitioning
withTimePartitioning(timePartitioning)
"Time-based partitioning specification for the destination table. Structure is documented below."
fn spec.initProvider.load.withTimePartitioningMixin
withTimePartitioningMixin(timePartitioning)
"Time-based partitioning specification for the destination table. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.load.withWriteDisposition
withWriteDisposition(writeDisposition)
"Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY."
obj spec.initProvider.load.destinationEncryptionConfiguration
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.initProvider.load.destinationEncryptionConfiguration.withKmsKeyName
withKmsKeyName(kmsKeyName)
"Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key."
obj spec.initProvider.load.destinationTable
"The destination table to load the data into. Structure is documented below."
fn spec.initProvider.load.destinationTable.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
obj spec.initProvider.load.parquetOptions
"Parquet Options for load and make external tables. Structure is documented below."
fn spec.initProvider.load.parquetOptions.withEnableListInference
withEnableListInference(enableListInference)
"If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type."
fn spec.initProvider.load.parquetOptions.withEnumAsString
withEnumAsString(enumAsString)
"If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default."
obj spec.initProvider.load.timePartitioning
"Time-based partitioning specification for the destination table. Structure is documented below."
fn spec.initProvider.load.timePartitioning.withExpirationMs
withExpirationMs(expirationMs)
"Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value."
fn spec.initProvider.load.timePartitioning.withField
withField(field)
"If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value."
fn spec.initProvider.load.timePartitioning.withType
withType(type)
"The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset."
obj spec.initProvider.query
"Configures a query job. Structure is documented below."
fn spec.initProvider.query.withAllowLargeResults
withAllowLargeResults(allowLargeResults)
"If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size."
fn spec.initProvider.query.withCreateDisposition
withCreateDisposition(createDisposition)
"Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER."
fn spec.initProvider.query.withDefaultDataset
withDefaultDataset(defaultDataset)
"Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below."
fn spec.initProvider.query.withDefaultDatasetMixin
withDefaultDatasetMixin(defaultDataset)
"Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.query.withDestinationEncryptionConfiguration
withDestinationEncryptionConfiguration(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.initProvider.query.withDestinationEncryptionConfigurationMixin
withDestinationEncryptionConfigurationMixin(destinationEncryptionConfiguration)
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.query.withDestinationTable
withDestinationTable(destinationTable)
"Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below."
fn spec.initProvider.query.withDestinationTableMixin
withDestinationTableMixin(destinationTable)
"Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.query.withFlattenResults
withFlattenResults(flattenResults)
"If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened."
fn spec.initProvider.query.withMaximumBillingTier
withMaximumBillingTier(maximumBillingTier)
"Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default."
fn spec.initProvider.query.withMaximumBytesBilled
withMaximumBytesBilled(maximumBytesBilled)
"Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default."
fn spec.initProvider.query.withParameterMode
withParameterMode(parameterMode)
"Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query."
fn spec.initProvider.query.withPriority
withPriority(priority)
"Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH."
fn spec.initProvider.query.withQuery
withQuery(query)
"SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = \"\" and write_disposition = \"\"."
fn spec.initProvider.query.withSchemaUpdateOptions
withSchemaUpdateOptions(schemaUpdateOptions)
"Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable."
fn spec.initProvider.query.withSchemaUpdateOptionsMixin
withSchemaUpdateOptionsMixin(schemaUpdateOptions)
"Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable."
Note: This function appends passed data to existing values
fn spec.initProvider.query.withScriptOptions
withScriptOptions(scriptOptions)
"Options controlling the execution of scripts. Structure is documented below."
fn spec.initProvider.query.withScriptOptionsMixin
withScriptOptionsMixin(scriptOptions)
"Options controlling the execution of scripts. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.query.withUseLegacySql
withUseLegacySql(useLegacySql)
"Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL."
fn spec.initProvider.query.withUseQueryCache
withUseQueryCache(useQueryCache)
"Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true."
fn spec.initProvider.query.withUserDefinedFunctionResources
withUserDefinedFunctionResources(userDefinedFunctionResources)
"Describes user-defined function resources used in the query. Structure is documented below."
fn spec.initProvider.query.withUserDefinedFunctionResourcesMixin
withUserDefinedFunctionResourcesMixin(userDefinedFunctionResources)
"Describes user-defined function resources used in the query. Structure is documented below."
Note: This function appends passed data to existing values
fn spec.initProvider.query.withWriteDisposition
withWriteDisposition(writeDisposition)
"Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY."
obj spec.initProvider.query.defaultDataset
"Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below."
fn spec.initProvider.query.defaultDataset.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
obj spec.initProvider.query.destinationEncryptionConfiguration
"Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below."
fn spec.initProvider.query.destinationEncryptionConfiguration.withKmsKeyName
withKmsKeyName(kmsKeyName)
"Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key."
obj spec.initProvider.query.destinationTable
"Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below."
fn spec.initProvider.query.destinationTable.withProjectId
withProjectId(projectId)
"The ID of the project containing this table."
obj spec.initProvider.query.scriptOptions
"Options controlling the execution of scripts. Structure is documented below."
fn spec.initProvider.query.scriptOptions.withKeyResultStatement
withKeyResultStatement(keyResultStatement)
"Determines which statement in the script represents the \"key result\", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT."
fn spec.initProvider.query.scriptOptions.withStatementByteBudget
withStatementByteBudget(statementByteBudget)
"Limit on the number of bytes billed per statement. Exceeding this budget results in an error."
fn spec.initProvider.query.scriptOptions.withStatementTimeoutMs
withStatementTimeoutMs(statementTimeoutMs)
"Timeout period for each statement in a script."
obj spec.initProvider.query.userDefinedFunctionResources
"Describes user-defined function resources used in the query. Structure is documented below."
fn spec.initProvider.query.userDefinedFunctionResources.withInlineCode
withInlineCode(inlineCode)
"An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code."
fn spec.initProvider.query.userDefinedFunctionResources.withResourceUri
withResourceUri(resourceUri)
"A code resource to load from a Google Cloud Storage URI (gs://bucket/path)."
obj spec.providerConfigRef
"ProviderConfigReference specifies how the provider that will be used to create, observe, update, and delete this managed resource should be configured."
fn spec.providerConfigRef.withName
withName(name)
"Name of the referenced object."
obj spec.providerConfigRef.policy
"Policies for referencing."
fn spec.providerConfigRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.providerConfigRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.providerRef
"ProviderReference specifies the provider that will be used to create, observe, update, and delete this managed resource. Deprecated: Please use ProviderConfigReference, i.e. providerConfigRef
"
fn spec.providerRef.withName
withName(name)
"Name of the referenced object."
obj spec.providerRef.policy
"Policies for referencing."
fn spec.providerRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.providerRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.publishConnectionDetailsTo
"PublishConnectionDetailsTo specifies the connection secret config which contains a name, metadata and a reference to secret store config to which any connection details for this managed resource should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource."
fn spec.publishConnectionDetailsTo.withName
withName(name)
"Name is the name of the connection secret."
obj spec.publishConnectionDetailsTo.configRef
"SecretStoreConfigRef specifies which secret store config should be used for this ConnectionSecret."
fn spec.publishConnectionDetailsTo.configRef.withName
withName(name)
"Name of the referenced object."
obj spec.publishConnectionDetailsTo.configRef.policy
"Policies for referencing."
fn spec.publishConnectionDetailsTo.configRef.policy.withResolution
withResolution(resolution)
"Resolution specifies whether resolution of this reference is required. The default is 'Required', which means the reconcile will fail if the reference cannot be resolved. 'Optional' means this reference will be a no-op if it cannot be resolved."
fn spec.publishConnectionDetailsTo.configRef.policy.withResolve
withResolve(resolve)
"Resolve specifies when this reference should be resolved. The default is 'IfNotPresent', which will attempt to resolve the reference only when the corresponding field is not present. Use 'Always' to resolve the reference on every reconcile."
obj spec.publishConnectionDetailsTo.metadata
"Metadata is the metadata for connection secret."
fn spec.publishConnectionDetailsTo.metadata.withAnnotations
withAnnotations(annotations)
"Annotations are the annotations to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.annotations\". - It is up to Secret Store implementation for others store types."
fn spec.publishConnectionDetailsTo.metadata.withAnnotationsMixin
withAnnotationsMixin(annotations)
"Annotations are the annotations to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.annotations\". - It is up to Secret Store implementation for others store types."
Note: This function appends passed data to existing values
fn spec.publishConnectionDetailsTo.metadata.withLabels
withLabels(labels)
"Labels are the labels/tags to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.labels\". - It is up to Secret Store implementation for others store types."
fn spec.publishConnectionDetailsTo.metadata.withLabelsMixin
withLabelsMixin(labels)
"Labels are the labels/tags to be added to connection secret. - For Kubernetes secrets, this will be used as \"metadata.labels\". - It is up to Secret Store implementation for others store types."
Note: This function appends passed data to existing values
fn spec.publishConnectionDetailsTo.metadata.withType
withType(type)
"Type is the SecretType for the connection secret. - Only valid for Kubernetes Secret Stores."
obj spec.writeConnectionSecretToRef
"WriteConnectionSecretToReference specifies the namespace and name of a Secret to which any connection details for this managed resource should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource. This field is planned to be replaced in a future release in favor of PublishConnectionDetailsTo. Currently, both could be set independently and connection details would be published to both without affecting each other."
fn spec.writeConnectionSecretToRef.withName
withName(name)
"Name of the secret."
fn spec.writeConnectionSecretToRef.withNamespace
withNamespace(namespace)
"Namespace of the secret."