1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. biglake
  5. Table
Google Cloud v8.23.0 published on Monday, Mar 24, 2025 by Pulumi

gcp.biglake.Table

Explore with Pulumi AI

Represents a table.

To get more information about Table, see:

Example Usage

Biglake Table

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const catalog = new gcp.biglake.Catalog("catalog", {
    name: "my_catalog",
    location: "US",
});
const bucket = new gcp.storage.Bucket("bucket", {
    name: "my_bucket",
    location: "US",
    forceDestroy: true,
    uniformBucketLevelAccess: true,
});
const metadataFolder = new gcp.storage.BucketObject("metadata_folder", {
    name: "metadata/",
    content: " ",
    bucket: bucket.name,
});
const dataFolder = new gcp.storage.BucketObject("data_folder", {
    name: "data/",
    content: " ",
    bucket: bucket.name,
});
const database = new gcp.biglake.Database("database", {
    name: "my_database",
    catalog: catalog.id,
    type: "HIVE",
    hiveOptions: {
        locationUri: pulumi.interpolate`gs://${bucket.name}/${metadataFolder.name}`,
        parameters: {
            owner: "Alex",
        },
    },
});
const table = new gcp.biglake.Table("table", {
    name: "my_table",
    database: database.id,
    type: "HIVE",
    hiveOptions: {
        tableType: "MANAGED_TABLE",
        storageDescriptor: {
            locationUri: pulumi.interpolate`gs://${bucket.name}/${dataFolder.name}`,
            inputFormat: "org.apache.hadoop.mapred.SequenceFileInputFormat",
            outputFormat: "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
        },
        parameters: {
            "spark.sql.create.version": "3.1.3",
            "spark.sql.sources.schema.numParts": "1",
            transient_lastDdlTime: "1680894197",
            "spark.sql.partitionProvider": "catalog",
            owner: "John Doe",
            "spark.sql.sources.schema.part.0": "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}",
            "spark.sql.sources.provider": "iceberg",
            provider: "iceberg",
        },
    },
});
Copy
import pulumi
import pulumi_gcp as gcp

catalog = gcp.biglake.Catalog("catalog",
    name="my_catalog",
    location="US")
bucket = gcp.storage.Bucket("bucket",
    name="my_bucket",
    location="US",
    force_destroy=True,
    uniform_bucket_level_access=True)
metadata_folder = gcp.storage.BucketObject("metadata_folder",
    name="metadata/",
    content=" ",
    bucket=bucket.name)
data_folder = gcp.storage.BucketObject("data_folder",
    name="data/",
    content=" ",
    bucket=bucket.name)
database = gcp.biglake.Database("database",
    name="my_database",
    catalog=catalog.id,
    type="HIVE",
    hive_options={
        "location_uri": pulumi.Output.all(
            bucketName=bucket.name,
            metadataFolderName=metadata_folder.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucketName']}/{resolved_outputs['metadataFolderName']}")
,
        "parameters": {
            "owner": "Alex",
        },
    })
table = gcp.biglake.Table("table",
    name="my_table",
    database=database.id,
    type="HIVE",
    hive_options={
        "table_type": "MANAGED_TABLE",
        "storage_descriptor": {
            "location_uri": pulumi.Output.all(
                bucketName=bucket.name,
                dataFolderName=data_folder.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucketName']}/{resolved_outputs['dataFolderName']}")
,
            "input_format": "org.apache.hadoop.mapred.SequenceFileInputFormat",
            "output_format": "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
        },
        "parameters": {
            "spark.sql.create.version": "3.1.3",
            "spark.sql.sources.schema.numParts": "1",
            "transient_lastDdlTime": "1680894197",
            "spark.sql.partitionProvider": "catalog",
            "owner": "John Doe",
            "spark.sql.sources.schema.part.0": "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}",
            "spark.sql.sources.provider": "iceberg",
            "provider": "iceberg",
        },
    })
Copy
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/biglake"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		catalog, err := biglake.NewCatalog(ctx, "catalog", &biglake.CatalogArgs{
			Name:     pulumi.String("my_catalog"),
			Location: pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
			Name:                     pulumi.String("my_bucket"),
			Location:                 pulumi.String("US"),
			ForceDestroy:             pulumi.Bool(true),
			UniformBucketLevelAccess: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		metadataFolder, err := storage.NewBucketObject(ctx, "metadata_folder", &storage.BucketObjectArgs{
			Name:    pulumi.String("metadata/"),
			Content: pulumi.String(" "),
			Bucket:  bucket.Name,
		})
		if err != nil {
			return err
		}
		dataFolder, err := storage.NewBucketObject(ctx, "data_folder", &storage.BucketObjectArgs{
			Name:    pulumi.String("data/"),
			Content: pulumi.String(" "),
			Bucket:  bucket.Name,
		})
		if err != nil {
			return err
		}
		database, err := biglake.NewDatabase(ctx, "database", &biglake.DatabaseArgs{
			Name:    pulumi.String("my_database"),
			Catalog: catalog.ID(),
			Type:    pulumi.String("HIVE"),
			HiveOptions: &biglake.DatabaseHiveOptionsArgs{
				LocationUri: pulumi.All(bucket.Name, metadataFolder.Name).ApplyT(func(_args []interface{}) (string, error) {
					bucketName := _args[0].(string)
					metadataFolderName := _args[1].(string)
					return fmt.Sprintf("gs://%v/%v", bucketName, metadataFolderName), nil
				}).(pulumi.StringOutput),
				Parameters: pulumi.StringMap{
					"owner": pulumi.String("Alex"),
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = biglake.NewTable(ctx, "table", &biglake.TableArgs{
			Name:     pulumi.String("my_table"),
			Database: database.ID(),
			Type:     pulumi.String("HIVE"),
			HiveOptions: &biglake.TableHiveOptionsArgs{
				TableType: pulumi.String("MANAGED_TABLE"),
				StorageDescriptor: &biglake.TableHiveOptionsStorageDescriptorArgs{
					LocationUri: pulumi.All(bucket.Name, dataFolder.Name).ApplyT(func(_args []interface{}) (string, error) {
						bucketName := _args[0].(string)
						dataFolderName := _args[1].(string)
						return fmt.Sprintf("gs://%v/%v", bucketName, dataFolderName), nil
					}).(pulumi.StringOutput),
					InputFormat:  pulumi.String("org.apache.hadoop.mapred.SequenceFileInputFormat"),
					OutputFormat: pulumi.String("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"),
				},
				Parameters: pulumi.StringMap{
					"spark.sql.create.version":          pulumi.String("3.1.3"),
					"spark.sql.sources.schema.numParts": pulumi.String("1"),
					"transient_lastDdlTime":             pulumi.String("1680894197"),
					"spark.sql.partitionProvider":       pulumi.String("catalog"),
					"owner":                             pulumi.String("John Doe"),
					"spark.sql.sources.schema.part.0":   pulumi.String("{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}"),
					"spark.sql.sources.provider":        pulumi.String("iceberg"),
					"provider":                          pulumi.String("iceberg"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var catalog = new Gcp.BigLake.Catalog("catalog", new()
    {
        Name = "my_catalog",
        Location = "US",
    });

    var bucket = new Gcp.Storage.Bucket("bucket", new()
    {
        Name = "my_bucket",
        Location = "US",
        ForceDestroy = true,
        UniformBucketLevelAccess = true,
    });

    var metadataFolder = new Gcp.Storage.BucketObject("metadata_folder", new()
    {
        Name = "metadata/",
        Content = " ",
        Bucket = bucket.Name,
    });

    var dataFolder = new Gcp.Storage.BucketObject("data_folder", new()
    {
        Name = "data/",
        Content = " ",
        Bucket = bucket.Name,
    });

    var database = new Gcp.BigLake.Database("database", new()
    {
        Name = "my_database",
        Catalog = catalog.Id,
        Type = "HIVE",
        HiveOptions = new Gcp.BigLake.Inputs.DatabaseHiveOptionsArgs
        {
            LocationUri = Output.Tuple(bucket.Name, metadataFolder.Name).Apply(values =>
            {
                var bucketName = values.Item1;
                var metadataFolderName = values.Item2;
                return $"gs://{bucketName}/{metadataFolderName}";
            }),
            Parameters = 
            {
                { "owner", "Alex" },
            },
        },
    });

    var table = new Gcp.BigLake.Table("table", new()
    {
        Name = "my_table",
        Database = database.Id,
        Type = "HIVE",
        HiveOptions = new Gcp.BigLake.Inputs.TableHiveOptionsArgs
        {
            TableType = "MANAGED_TABLE",
            StorageDescriptor = new Gcp.BigLake.Inputs.TableHiveOptionsStorageDescriptorArgs
            {
                LocationUri = Output.Tuple(bucket.Name, dataFolder.Name).Apply(values =>
                {
                    var bucketName = values.Item1;
                    var dataFolderName = values.Item2;
                    return $"gs://{bucketName}/{dataFolderName}";
                }),
                InputFormat = "org.apache.hadoop.mapred.SequenceFileInputFormat",
                OutputFormat = "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
            },
            Parameters = 
            {
                { "spark.sql.create.version", "3.1.3" },
                { "spark.sql.sources.schema.numParts", "1" },
                { "transient_lastDdlTime", "1680894197" },
                { "spark.sql.partitionProvider", "catalog" },
                { "owner", "John Doe" },
                { "spark.sql.sources.schema.part.0", "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}" },
                { "spark.sql.sources.provider", "iceberg" },
                { "provider", "iceberg" },
            },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.biglake.Catalog;
import com.pulumi.gcp.biglake.CatalogArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.biglake.Database;
import com.pulumi.gcp.biglake.DatabaseArgs;
import com.pulumi.gcp.biglake.inputs.DatabaseHiveOptionsArgs;
import com.pulumi.gcp.biglake.Table;
import com.pulumi.gcp.biglake.TableArgs;
import com.pulumi.gcp.biglake.inputs.TableHiveOptionsArgs;
import com.pulumi.gcp.biglake.inputs.TableHiveOptionsStorageDescriptorArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var catalog = new Catalog("catalog", CatalogArgs.builder()
            .name("my_catalog")
            .location("US")
            .build());

        var bucket = new Bucket("bucket", BucketArgs.builder()
            .name("my_bucket")
            .location("US")
            .forceDestroy(true)
            .uniformBucketLevelAccess(true)
            .build());

        var metadataFolder = new BucketObject("metadataFolder", BucketObjectArgs.builder()
            .name("metadata/")
            .content(" ")
            .bucket(bucket.name())
            .build());

        var dataFolder = new BucketObject("dataFolder", BucketObjectArgs.builder()
            .name("data/")
            .content(" ")
            .bucket(bucket.name())
            .build());

        var database = new Database("database", DatabaseArgs.builder()
            .name("my_database")
            .catalog(catalog.id())
            .type("HIVE")
            .hiveOptions(DatabaseHiveOptionsArgs.builder()
                .locationUri(Output.tuple(bucket.name(), metadataFolder.name()).applyValue(values -> {
                    var bucketName = values.t1;
                    var metadataFolderName = values.t2;
                    return String.format("gs://%s/%s", bucketName,metadataFolderName);
                }))
                .parameters(Map.of("owner", "Alex"))
                .build())
            .build());

        var table = new Table("table", TableArgs.builder()
            .name("my_table")
            .database(database.id())
            .type("HIVE")
            .hiveOptions(TableHiveOptionsArgs.builder()
                .tableType("MANAGED_TABLE")
                .storageDescriptor(TableHiveOptionsStorageDescriptorArgs.builder()
                    .locationUri(Output.tuple(bucket.name(), dataFolder.name()).applyValue(values -> {
                        var bucketName = values.t1;
                        var dataFolderName = values.t2;
                        return String.format("gs://%s/%s", bucketName,dataFolderName);
                    }))
                    .inputFormat("org.apache.hadoop.mapred.SequenceFileInputFormat")
                    .outputFormat("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat")
                    .build())
                .parameters(Map.ofEntries(
                    Map.entry("spark.sql.create.version", "3.1.3"),
                    Map.entry("spark.sql.sources.schema.numParts", "1"),
                    Map.entry("transient_lastDdlTime", "1680894197"),
                    Map.entry("spark.sql.partitionProvider", "catalog"),
                    Map.entry("owner", "John Doe"),
                    Map.entry("spark.sql.sources.schema.part.0", "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}"),
                    Map.entry("spark.sql.sources.provider", "iceberg"),
                    Map.entry("provider", "iceberg")
                ))
                .build())
            .build());

    }
}
Copy
resources:
  catalog:
    type: gcp:biglake:Catalog
    properties:
      name: my_catalog
      location: US
  bucket:
    type: gcp:storage:Bucket
    properties:
      name: my_bucket
      location: US
      forceDestroy: true
      uniformBucketLevelAccess: true
  metadataFolder:
    type: gcp:storage:BucketObject
    name: metadata_folder
    properties:
      name: metadata/
      content: ' '
      bucket: ${bucket.name}
  dataFolder:
    type: gcp:storage:BucketObject
    name: data_folder
    properties:
      name: data/
      content: ' '
      bucket: ${bucket.name}
  database:
    type: gcp:biglake:Database
    properties:
      name: my_database
      catalog: ${catalog.id}
      type: HIVE
      hiveOptions:
        locationUri: gs://${bucket.name}/${metadataFolder.name}
        parameters:
          owner: Alex
  table:
    type: gcp:biglake:Table
    properties:
      name: my_table
      database: ${database.id}
      type: HIVE
      hiveOptions:
        tableType: MANAGED_TABLE
        storageDescriptor:
          locationUri: gs://${bucket.name}/${dataFolder.name}
          inputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat
          outputFormat: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
        parameters:
          spark.sql.create.version: 3.1.3
          spark.sql.sources.schema.numParts: '1'
          transient_lastDdlTime: '1680894197'
          spark.sql.partitionProvider: catalog
          owner: John Doe
          spark.sql.sources.schema.part.0: '{"type":"struct","fields":[{"name":"id","type":"integer","nullable":true,"metadata":{}},{"name":"name","type":"string","nullable":true,"metadata":{}},{"name":"age","type":"integer","nullable":true,"metadata":{}}]}'
          spark.sql.sources.provider: iceberg
          provider: iceberg
Copy

Create Table Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new Table(name: string, args?: TableArgs, opts?: CustomResourceOptions);
@overload
def Table(resource_name: str,
          args: Optional[TableArgs] = None,
          opts: Optional[ResourceOptions] = None)

@overload
def Table(resource_name: str,
          opts: Optional[ResourceOptions] = None,
          database: Optional[str] = None,
          hive_options: Optional[TableHiveOptionsArgs] = None,
          name: Optional[str] = None,
          type: Optional[str] = None)
func NewTable(ctx *Context, name string, args *TableArgs, opts ...ResourceOption) (*Table, error)
public Table(string name, TableArgs? args = null, CustomResourceOptions? opts = null)
public Table(String name, TableArgs args)
public Table(String name, TableArgs args, CustomResourceOptions options)
type: gcp:biglake:Table
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args TableArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args TableArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args TableArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args TableArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. TableArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var tableResource = new Gcp.BigLake.Table("tableResource", new()
{
    Database = "string",
    HiveOptions = new Gcp.BigLake.Inputs.TableHiveOptionsArgs
    {
        Parameters = 
        {
            { "string", "string" },
        },
        StorageDescriptor = new Gcp.BigLake.Inputs.TableHiveOptionsStorageDescriptorArgs
        {
            InputFormat = "string",
            LocationUri = "string",
            OutputFormat = "string",
        },
        TableType = "string",
    },
    Name = "string",
    Type = "string",
});
Copy
example, err := biglake.NewTable(ctx, "tableResource", &biglake.TableArgs{
	Database: pulumi.String("string"),
	HiveOptions: &biglake.TableHiveOptionsArgs{
		Parameters: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		StorageDescriptor: &biglake.TableHiveOptionsStorageDescriptorArgs{
			InputFormat:  pulumi.String("string"),
			LocationUri:  pulumi.String("string"),
			OutputFormat: pulumi.String("string"),
		},
		TableType: pulumi.String("string"),
	},
	Name: pulumi.String("string"),
	Type: pulumi.String("string"),
})
Copy
var tableResource = new Table("tableResource", TableArgs.builder()
    .database("string")
    .hiveOptions(TableHiveOptionsArgs.builder()
        .parameters(Map.of("string", "string"))
        .storageDescriptor(TableHiveOptionsStorageDescriptorArgs.builder()
            .inputFormat("string")
            .locationUri("string")
            .outputFormat("string")
            .build())
        .tableType("string")
        .build())
    .name("string")
    .type("string")
    .build());
Copy
table_resource = gcp.biglake.Table("tableResource",
    database="string",
    hive_options={
        "parameters": {
            "string": "string",
        },
        "storage_descriptor": {
            "input_format": "string",
            "location_uri": "string",
            "output_format": "string",
        },
        "table_type": "string",
    },
    name="string",
    type="string")
Copy
const tableResource = new gcp.biglake.Table("tableResource", {
    database: "string",
    hiveOptions: {
        parameters: {
            string: "string",
        },
        storageDescriptor: {
            inputFormat: "string",
            locationUri: "string",
            outputFormat: "string",
        },
        tableType: "string",
    },
    name: "string",
    type: "string",
});
Copy
type: gcp:biglake:Table
properties:
    database: string
    hiveOptions:
        parameters:
            string: string
        storageDescriptor:
            inputFormat: string
            locationUri: string
            outputFormat: string
        tableType: string
    name: string
    type: string
Copy

Table Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The Table resource accepts the following input properties:

Database Changes to this property will trigger replacement. string
The id of the parent database.
HiveOptions TableHiveOptions
Options of a Hive table. Structure is documented below.
Name Changes to this property will trigger replacement. string
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


Type string
The database type. Possible values are: HIVE.
Database Changes to this property will trigger replacement. string
The id of the parent database.
HiveOptions TableHiveOptionsArgs
Options of a Hive table. Structure is documented below.
Name Changes to this property will trigger replacement. string
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


Type string
The database type. Possible values are: HIVE.
database Changes to this property will trigger replacement. String
The id of the parent database.
hiveOptions TableHiveOptions
Options of a Hive table. Structure is documented below.
name Changes to this property will trigger replacement. String
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


type String
The database type. Possible values are: HIVE.
database Changes to this property will trigger replacement. string
The id of the parent database.
hiveOptions TableHiveOptions
Options of a Hive table. Structure is documented below.
name Changes to this property will trigger replacement. string
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


type string
The database type. Possible values are: HIVE.
database Changes to this property will trigger replacement. str
The id of the parent database.
hive_options TableHiveOptionsArgs
Options of a Hive table. Structure is documented below.
name Changes to this property will trigger replacement. str
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


type str
The database type. Possible values are: HIVE.
database Changes to this property will trigger replacement. String
The id of the parent database.
hiveOptions Property Map
Options of a Hive table. Structure is documented below.
name Changes to this property will trigger replacement. String
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


type String
The database type. Possible values are: HIVE.

Outputs

All input properties are implicitly available as output properties. Additionally, the Table resource produces the following output properties:

CreateTime string
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
DeleteTime string
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Etag string
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
ExpireTime string
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Id string
The provider-assigned unique ID for this managed resource.
UpdateTime string
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
CreateTime string
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
DeleteTime string
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Etag string
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
ExpireTime string
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Id string
The provider-assigned unique ID for this managed resource.
UpdateTime string
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
createTime String
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
deleteTime String
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
etag String
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
expireTime String
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
id String
The provider-assigned unique ID for this managed resource.
updateTime String
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
createTime string
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
deleteTime string
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
etag string
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
expireTime string
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
id string
The provider-assigned unique ID for this managed resource.
updateTime string
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
create_time str
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
delete_time str
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
etag str
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
expire_time str
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
id str
The provider-assigned unique ID for this managed resource.
update_time str
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
createTime String
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
deleteTime String
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
etag String
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
expireTime String
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
id String
The provider-assigned unique ID for this managed resource.
updateTime String
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".

Look up Existing Table Resource

Get an existing Table resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: TableState, opts?: CustomResourceOptions): Table
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        create_time: Optional[str] = None,
        database: Optional[str] = None,
        delete_time: Optional[str] = None,
        etag: Optional[str] = None,
        expire_time: Optional[str] = None,
        hive_options: Optional[TableHiveOptionsArgs] = None,
        name: Optional[str] = None,
        type: Optional[str] = None,
        update_time: Optional[str] = None) -> Table
func GetTable(ctx *Context, name string, id IDInput, state *TableState, opts ...ResourceOption) (*Table, error)
public static Table Get(string name, Input<string> id, TableState? state, CustomResourceOptions? opts = null)
public static Table get(String name, Output<String> id, TableState state, CustomResourceOptions options)
resources:  _:    type: gcp:biglake:Table    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
CreateTime string
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Database Changes to this property will trigger replacement. string
The id of the parent database.
DeleteTime string
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Etag string
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
ExpireTime string
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
HiveOptions TableHiveOptions
Options of a Hive table. Structure is documented below.
Name Changes to this property will trigger replacement. string
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


Type string
The database type. Possible values are: HIVE.
UpdateTime string
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
CreateTime string
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Database Changes to this property will trigger replacement. string
The id of the parent database.
DeleteTime string
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Etag string
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
ExpireTime string
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
HiveOptions TableHiveOptionsArgs
Options of a Hive table. Structure is documented below.
Name Changes to this property will trigger replacement. string
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


Type string
The database type. Possible values are: HIVE.
UpdateTime string
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
createTime String
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
database Changes to this property will trigger replacement. String
The id of the parent database.
deleteTime String
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
etag String
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
expireTime String
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
hiveOptions TableHiveOptions
Options of a Hive table. Structure is documented below.
name Changes to this property will trigger replacement. String
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


type String
The database type. Possible values are: HIVE.
updateTime String
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
createTime string
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
database Changes to this property will trigger replacement. string
The id of the parent database.
deleteTime string
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
etag string
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
expireTime string
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
hiveOptions TableHiveOptions
Options of a Hive table. Structure is documented below.
name Changes to this property will trigger replacement. string
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


type string
The database type. Possible values are: HIVE.
updateTime string
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
create_time str
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
database Changes to this property will trigger replacement. str
The id of the parent database.
delete_time str
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
etag str
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
expire_time str
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
hive_options TableHiveOptionsArgs
Options of a Hive table. Structure is documented below.
name Changes to this property will trigger replacement. str
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


type str
The database type. Possible values are: HIVE.
update_time str
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
createTime String
Output only. The creation time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
database Changes to this property will trigger replacement. String
The id of the parent database.
deleteTime String
Output only. The deletion time of the table. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
etag String
The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
expireTime String
Output only. The time when this table is considered expired. Only set after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
hiveOptions Property Map
Options of a Hive table. Structure is documented below.
name Changes to this property will trigger replacement. String
Output only. The name of the Table. Format: projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}


type String
The database type. Possible values are: HIVE.
updateTime String
Output only. The last modification time of the table. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".

Supporting Types

TableHiveOptions
, TableHiveOptionsArgs

Parameters Dictionary<string, string>
Stores user supplied Hive table parameters. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
StorageDescriptor TableHiveOptionsStorageDescriptor
Stores physical storage information on the data. Structure is documented below.
TableType string
Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
Parameters map[string]string
Stores user supplied Hive table parameters. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
StorageDescriptor TableHiveOptionsStorageDescriptor
Stores physical storage information on the data. Structure is documented below.
TableType string
Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
parameters Map<String,String>
Stores user supplied Hive table parameters. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
storageDescriptor TableHiveOptionsStorageDescriptor
Stores physical storage information on the data. Structure is documented below.
tableType String
Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
parameters {[key: string]: string}
Stores user supplied Hive table parameters. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
storageDescriptor TableHiveOptionsStorageDescriptor
Stores physical storage information on the data. Structure is documented below.
tableType string
Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
parameters Mapping[str, str]
Stores user supplied Hive table parameters. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
storage_descriptor TableHiveOptionsStorageDescriptor
Stores physical storage information on the data. Structure is documented below.
table_type str
Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
parameters Map<String>
Stores user supplied Hive table parameters. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
storageDescriptor Property Map
Stores physical storage information on the data. Structure is documented below.
tableType String
Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.

TableHiveOptionsStorageDescriptor
, TableHiveOptionsStorageDescriptorArgs

InputFormat string
The fully qualified Java class name of the input format.
LocationUri string
Cloud Storage folder URI where the table data is stored, starting with "gs://".
OutputFormat string
The fully qualified Java class name of the output format.
InputFormat string
The fully qualified Java class name of the input format.
LocationUri string
Cloud Storage folder URI where the table data is stored, starting with "gs://".
OutputFormat string
The fully qualified Java class name of the output format.
inputFormat String
The fully qualified Java class name of the input format.
locationUri String
Cloud Storage folder URI where the table data is stored, starting with "gs://".
outputFormat String
The fully qualified Java class name of the output format.
inputFormat string
The fully qualified Java class name of the input format.
locationUri string
Cloud Storage folder URI where the table data is stored, starting with "gs://".
outputFormat string
The fully qualified Java class name of the output format.
input_format str
The fully qualified Java class name of the input format.
location_uri str
Cloud Storage folder URI where the table data is stored, starting with "gs://".
output_format str
The fully qualified Java class name of the output format.
inputFormat String
The fully qualified Java class name of the input format.
locationUri String
Cloud Storage folder URI where the table data is stored, starting with "gs://".
outputFormat String
The fully qualified Java class name of the output format.

Import

Table can be imported using any of these accepted formats:

  • {{database}}/tables/{{name}}

When using the pulumi import command, Table can be imported using one of the formats above. For example:

$ pulumi import gcp:biglake/table:Table default {{database}}/tables/{{name}}
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
Google Cloud (GCP) Classic pulumi/pulumi-gcp
License
Apache-2.0
Notes
This Pulumi package is based on the google-beta Terraform Provider.