From 27044d52e6bb6b4b6dbc746a0cfb02951817d7f1 Mon Sep 17 00:00:00 2001 From: Alvaro Viebrantz Date: Wed, 12 Feb 2025 10:13:09 -0400 Subject: [PATCH] fix: avoid schema field mutation when passing selectedFields opt (#1437) --- src/bigquery.ts | 6 +- src/table.ts | 10 +-- src/types.d.ts | 169 +++++++----------------------------------------- test/table.ts | 50 ++++++++++++++ 4 files changed, 85 insertions(+), 150 deletions(-) diff --git a/src/bigquery.ts b/src/bigquery.ts index 238b34c3..392a069c 100644 --- a/src/bigquery.ts +++ b/src/bigquery.ts @@ -588,6 +588,8 @@ export class BigQuery extends Service { parseJSON?: boolean; } ) { + // copy schema fields to avoid mutation when filtering selected fields + let schemaFields = schema.fields ? [...schema.fields] : []; if (options.selectedFields && options.selectedFields!.length > 0) { const selectedFieldsArray = options.selectedFields!.map(c => { return c.split('.'); @@ -595,7 +597,7 @@ export class BigQuery extends Service { const currentFields = selectedFieldsArray.map(c => c.shift()); //filter schema fields based on selected fields. - schema.fields = schema.fields?.filter( + schemaFields = schemaFields.filter( field => currentFields .map(c => c!.toLowerCase()) @@ -610,7 +612,7 @@ export class BigQuery extends Service { function mergeSchema(row: TableRow) { return row.f!.map((field: TableRowField, index: number) => { - const schemaField = schema.fields![index]; + const schemaField = schemaFields[index]; let value = field.v; if (schemaField.mode === 'REPEATED') { value = (value as TableRowField[]).map(val => { diff --git a/src/table.ts b/src/table.ts index dd30eca2..c9f5ffb9 100644 --- a/src/table.ts +++ b/src/table.ts @@ -1830,6 +1830,10 @@ class Table extends ServiceObject { delete options.wrapIntegers; const parseJSON = options.parseJSON ? options.parseJSON : false; delete options.parseJSON; + const selectedFields = options.selectedFields + ? options.selectedFields.split(',') + : []; + delete options.selectedFields; const onComplete = ( err: Error | null, rows: TableRow[] | null, @@ -1841,10 +1845,8 @@ class Table extends ServiceObject { return; } rows = BigQuery.mergeSchemaWithRows_(this.metadata.schema, rows || [], { - wrapIntegers: wrapIntegers, - selectedFields: options.selectedFields - ? options.selectedFields!.split(',') - : [], + wrapIntegers, + selectedFields, parseJSON, }); callback!(null, rows, nextQuery, resp); diff --git a/src/types.d.ts b/src/types.d.ts index e20cc0a0..f765ffd7 100644 --- a/src/types.d.ts +++ b/src/types.d.ts @@ -13,7 +13,7 @@ // limitations under the License. /** - * Discovery Revision: 20250128 + * Discovery Revision: 20241013 */ /** @@ -77,7 +77,7 @@ declare namespace bigquery { */ argumentKind?: 'ARGUMENT_KIND_UNSPECIFIED' | 'FIXED_TYPE' | 'ANY_TYPE'; /** - * Set if argument_kind == FIXED_TYPE. + * Required unless argument_kind = ANY_TYPE. */ dataType?: IStandardSqlDataType; /** @@ -354,20 +354,6 @@ declare namespace bigquery { useAvroLogicalTypes?: boolean; }; - /** - * Request message for the BatchDeleteRowAccessPoliciesRequest method. - */ - type IBatchDeleteRowAccessPoliciesRequest = { - /** - * If set to true, it deletes the row access policy even if it's the last row access policy on the table and the deletion will widen the access rather narrowing it. - */ - force?: boolean; - /** - * Required. Policy IDs of the row access policies. - */ - policyIds?: Array; - }; - /** * Reason why BI Engine didn't accelerate the query (or sub-query). */ @@ -421,19 +407,19 @@ declare namespace bigquery { */ type IBigLakeConfiguration = { /** - * Optional. The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form `{project}.{location}.{connection_id}` or `projects/{project}/locations/{location}/connections/{connection_id}". + * Required. The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form `{project}.{location}.{connection_id}` or `projects/{project}/locations/{location}/connections/{connection_id}". */ connectionId?: string; /** - * Optional. The file format the table data is stored in. + * Required. The file format the table data is stored in. */ fileFormat?: 'FILE_FORMAT_UNSPECIFIED' | 'PARQUET'; /** - * Optional. The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format `gs://bucket/path_to_table/` + * Required. The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format `gs://bucket/path_to_table/` */ storageUri?: string; /** - * Optional. The table format the metadata only snapshots are stored in. + * Required. The table format the metadata only snapshots are stored in. */ tableFormat?: 'TABLE_FORMAT_UNSPECIFIED' | 'ICEBERG'; }; @@ -740,7 +726,7 @@ declare namespace bigquery { */ type IClustering = { /** - * One or more fields on which data should be clustered. Only top-level, non-repeated, simple-type fields are supported. The ordering of the clustering fields should be prioritized from most to least important for filtering purposes. For additional information, see [Introduction to clustered tables](https://cloud.google.com/bigquery/docs/clustered-tables#limitations). + * One or more fields on which data should be clustered. Only top-level, non-repeated, simple-type fields are supported. The ordering of the clustering fields should be prioritized from most to least important for filtering purposes. Additional information on limitations can be found here: https://cloud.google.com/bigquery/docs/creating-clustered-tables#limitations */ fields?: Array; }; @@ -1539,7 +1525,7 @@ declare namespace bigquery { }; /** - * Options defining open source compatible datasets living in the BigQuery catalog. Contains metadata of open source database, schema, or namespace represented by the current dataset. + * Options defining open source compatible datasets living in the BigQuery catalog. Contains metadata of open source database, schema or namespace represented by the current dataset. */ type IExternalCatalogDatasetOptions = { /** @@ -1547,21 +1533,21 @@ declare namespace bigquery { */ defaultStorageLocationUri?: string; /** - * Optional. A map of key value pairs defining the parameters and properties of the open source schema. Maximum size of 2MiB. + * Optional. A map of key value pairs defining the parameters and properties of the open source schema. Maximum size of 2Mib. */ parameters?: {[key: string]: string}; }; /** - * Metadata about open source compatible table. The fields contained in these options correspond to Hive metastore's table-level properties. + * Metadata about open source compatible table. The fields contained in these options correspond to hive metastore's table level properties. */ type IExternalCatalogTableOptions = { /** - * Optional. A connection ID that specifies the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or Amazon S3. This connection is needed to read the open source table from BigQuery. The connection_id format must be either `..` or `projects//locations//connections/`. + * Optional. The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connection is needed to read the open source table from BigQuery Engine. The connection_id can have the form `..` or `projects//locations//connections/`. */ connectionId?: string; /** - * Optional. A map of the key-value pairs defining the parameters and properties of the open source table. Corresponds with Hive metastore table parameters. Maximum size of 4MiB. + * Optional. A map of key value pairs defining the parameters and properties of the open source table. Corresponds with hive meta store table parameters. Maximum size of 4Mib. */ parameters?: {[key: string]: string}; /** @@ -2618,10 +2604,6 @@ declare namespace bigquery { * Optional. Specifies the action that occurs if the destination table already exists. The following values are supported: * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the data, removes the constraints, and uses the schema from the query result. * WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. */ writeDisposition?: string; - /** - * Optional. This is only supported for a SELECT query using a temporary table. If set, the query is allowed to write results incrementally to the temporary result table. This may incur a performance penalty. This option cannot be used with Legacy SQL. This feature is not yet available. - */ - writeIncrementalResults?: boolean; }; /** @@ -3024,7 +3006,7 @@ declare namespace bigquery { */ sparkStatistics?: ISparkStatistics; /** - * Output only. The type of query statement, if valid. Possible values: * `SELECT`: [`SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_list) statement. * `ASSERT`: [`ASSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements#assert) statement. * `INSERT`: [`INSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) statement. * `UPDATE`: [`UPDATE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#update_statement) statement. * `DELETE`: [`DELETE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) statement. * `MERGE`: [`MERGE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) statement. * `CREATE_TABLE`: [`CREATE TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) statement, without `AS SELECT`. * `CREATE_TABLE_AS_SELECT`: [`CREATE TABLE AS SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) statement. * `CREATE_VIEW`: [`CREATE VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement) statement. * `CREATE_MODEL`: [`CREATE MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement) statement. * `CREATE_MATERIALIZED_VIEW`: [`CREATE MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement) statement. * `CREATE_FUNCTION`: [`CREATE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement) statement. * `CREATE_TABLE_FUNCTION`: [`CREATE TABLE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement) statement. * `CREATE_PROCEDURE`: [`CREATE PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure) statement. * `CREATE_ROW_ACCESS_POLICY`: [`CREATE ROW ACCESS POLICY`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement) statement. * `CREATE_SCHEMA`: [`CREATE SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement) statement. * `CREATE_SNAPSHOT_TABLE`: [`CREATE SNAPSHOT TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement) statement. * `CREATE_SEARCH_INDEX`: [`CREATE SEARCH INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement) statement. * `DROP_TABLE`: [`DROP TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement) statement. * `DROP_EXTERNAL_TABLE`: [`DROP EXTERNAL TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement) statement. * `DROP_VIEW`: [`DROP VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement) statement. * `DROP_MODEL`: [`DROP MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) statement. * `DROP_MATERIALIZED_VIEW`: [`DROP MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement) statement. * `DROP_FUNCTION` : [`DROP FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement) statement. * `DROP_TABLE_FUNCTION` : [`DROP TABLE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function) statement. * `DROP_PROCEDURE`: [`DROP PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement) statement. * `DROP_SEARCH_INDEX`: [`DROP SEARCH INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index) statement. * `DROP_SCHEMA`: [`DROP SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement) statement. * `DROP_SNAPSHOT_TABLE`: [`DROP SNAPSHOT TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement) statement. * `DROP_ROW_ACCESS_POLICY`: [`DROP [ALL] ROW ACCESS POLICY|POLICIES`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement) statement. * `ALTER_TABLE`: [`ALTER TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement) statement. * `ALTER_VIEW`: [`ALTER VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement) statement. * `ALTER_MATERIALIZED_VIEW`: [`ALTER MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement) statement. * `ALTER_SCHEMA`: [`ALTER SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_schema_set_options_statement) statement. * `SCRIPT`: [`SCRIPT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language). * `TRUNCATE_TABLE`: [`TRUNCATE TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) statement. * `CREATE_EXTERNAL_TABLE`: [`CREATE EXTERNAL TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement) statement. * `EXPORT_DATA`: [`EXPORT DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement) statement. * `EXPORT_MODEL`: [`EXPORT MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) statement. * `LOAD_DATA`: [`LOAD DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) statement. * `CALL`: [`CALL`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call) statement. + * Output only. The type of query statement, if valid. Possible values: * `SELECT`: [`SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_list) statement. * `ASSERT`: [`ASSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements#assert) statement. * `INSERT`: [`INSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) statement. * `UPDATE`: [`UPDATE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#update_statement) statement. * `DELETE`: [`DELETE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) statement. * `MERGE`: [`MERGE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) statement. * `CREATE_TABLE`: [`CREATE TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) statement, without `AS SELECT`. * `CREATE_TABLE_AS_SELECT`: [`CREATE TABLE AS SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#query_statement) statement. * `CREATE_VIEW`: [`CREATE VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement) statement. * `CREATE_MODEL`: [`CREATE MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement) statement. * `CREATE_MATERIALIZED_VIEW`: [`CREATE MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement) statement. * `CREATE_FUNCTION`: [`CREATE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement) statement. * `CREATE_TABLE_FUNCTION`: [`CREATE TABLE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement) statement. * `CREATE_PROCEDURE`: [`CREATE PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure) statement. * `CREATE_ROW_ACCESS_POLICY`: [`CREATE ROW ACCESS POLICY`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement) statement. * `CREATE_SCHEMA`: [`CREATE SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement) statement. * `CREATE_SNAPSHOT_TABLE`: [`CREATE SNAPSHOT TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement) statement. * `CREATE_SEARCH_INDEX`: [`CREATE SEARCH INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement) statement. * `DROP_TABLE`: [`DROP TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement) statement. * `DROP_EXTERNAL_TABLE`: [`DROP EXTERNAL TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement) statement. * `DROP_VIEW`: [`DROP VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement) statement. * `DROP_MODEL`: [`DROP MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) statement. * `DROP_MATERIALIZED_VIEW`: [`DROP MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement) statement. * `DROP_FUNCTION` : [`DROP FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement) statement. * `DROP_TABLE_FUNCTION` : [`DROP TABLE FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function) statement. * `DROP_PROCEDURE`: [`DROP PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement) statement. * `DROP_SEARCH_INDEX`: [`DROP SEARCH INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index) statement. * `DROP_SCHEMA`: [`DROP SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement) statement. * `DROP_SNAPSHOT_TABLE`: [`DROP SNAPSHOT TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement) statement. * `DROP_ROW_ACCESS_POLICY`: [`DROP [ALL] ROW ACCESS POLICY|POLICIES`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement) statement. * `ALTER_TABLE`: [`ALTER TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement) statement. * `ALTER_VIEW`: [`ALTER VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement) statement. * `ALTER_MATERIALIZED_VIEW`: [`ALTER MATERIALIZED VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement) statement. * `ALTER_SCHEMA`: [`ALTER SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema_set_options_statement) statement. * `SCRIPT`: [`SCRIPT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language). * `TRUNCATE_TABLE`: [`TRUNCATE TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) statement. * `CREATE_EXTERNAL_TABLE`: [`CREATE EXTERNAL TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement) statement. * `EXPORT_DATA`: [`EXPORT DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement) statement. * `EXPORT_MODEL`: [`EXPORT MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) statement. * `LOAD_DATA`: [`LOAD DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) statement. * `CALL`: [`CALL`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call) statement. */ statementType?: string; /** @@ -3319,7 +3301,7 @@ declare namespace bigquery { */ type IMaterializedViewDefinition = { /** - * Optional. This option declares the intention to construct a materialized view that isn't refreshed incrementally. Non-incremental materialized views support an expanded range of SQL queries. The `allow_non_incremental_definition` option can't be changed after the materialized view is created. + * Optional. This option declares the intention to construct a materialized view that isn't refreshed incrementally. */ allowNonIncrementalDefinition?: boolean; /** @@ -3917,7 +3899,7 @@ declare namespace bigquery { */ labels?: {[key: string]: string}; /** - * The geographic location where the job should run. For more information, see how to [specify locations](https://cloud.google.com/bigquery/docs/locations#specify_locations). + * The geographic location where the job should run. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location. */ location?: string; /** @@ -3960,10 +3942,6 @@ declare namespace bigquery { * Optional. Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. The default value is true. */ useQueryCache?: boolean; - /** - * Optional. This is only supported for SELECT query. If set, the query is allowed to write results incrementally to the temporary result table. This may incur a performance penalty. This option cannot be used with Legacy SQL. This feature is not yet available. - */ - writeIncrementalResults?: boolean; }; type IQueryResponse = { @@ -3971,18 +3949,10 @@ declare namespace bigquery { * Whether the query result was fetched from the query cache. */ cacheHit?: boolean; - /** - * Output only. Creation time of this query, in milliseconds since the epoch. This field will be present on all queries. - */ - creationTime?: string; /** * Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE, MERGE or TRUNCATE. */ dmlStats?: IDmlStatistics; - /** - * Output only. End time of this query, in milliseconds since the epoch. This field will be present whenever a query job is in the DONE state. - */ - endTime?: string; /** * Output only. The first errors or warnings encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has completed or was unsuccessful. For more information about error messages, see [Error messages](https://cloud.google.com/bigquery/docs/error-messages). */ @@ -4003,10 +3973,6 @@ declare namespace bigquery { * The resource type. */ kind?: string; - /** - * Output only. The geographic location of the query. For more information about BigQuery locations, see: https://cloud.google.com/bigquery/docs/locations - */ - location?: string; /** * Output only. The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE. */ @@ -4031,14 +3997,6 @@ declare namespace bigquery { * Output only. Information of the session if this job is part of one. */ sessionInfo?: ISessionInfo; - /** - * Output only. Start time of this query, in milliseconds since the epoch. This field will be present when the query job transitions from the PENDING state to either RUNNING or DONE. - */ - startTime?: string; - /** - * Output only. If the project is configured to use on-demand pricing, then this field contains the total bytes billed for the job. If the project is configured to use flat-rate pricing, then you are not billed for bytes and this field is informational only. - */ - totalBytesBilled?: string; /** * The total number of bytes processed for this query. If this query was a dry run, this is the number of bytes that would be processed if the query were run. */ @@ -4047,10 +4005,6 @@ declare namespace bigquery { * The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. */ totalRows?: string; - /** - * Output only. Number of slot ms the user is actually billed for. - */ - totalSlotMs?: string; }; /** @@ -4373,10 +4327,6 @@ declare namespace bigquery { * Required. A SQL boolean expression that represents the rows defined by this row access policy, similar to the boolean expression in a WHERE clause of a SELECT query on a table. References to other tables, routines, and temporary functions are not supported. Examples: region="EU" date_field = CAST('2019-9-27' as DATE) nullable_field is not NULL numeric_field BETWEEN 1.0 AND 5.0 */ filterPredicate?: string; - /** - * Optional. Input only. The optional list of iam_member users or groups that specifies the initial members that the row-level access policy should be created with. grantees types: - "user:alice@example.com": An email address that represents a specific Google account. - "serviceAccount:my-other-app@appspot.gserviceaccount.com": An email address that represents a service account. - "group:admins@example.com": An email address that represents a Google group. - "domain:example.com":The Google Workspace domain (primary) that represents all the users of that domain. - "allAuthenticatedUsers": A special identifier that represents all service accounts and all users on the internet who have authenticated with a Google Account. This identifier includes accounts that aren't connected to a Google Workspace or Cloud Identity domain, such as personal Gmail accounts. Users who aren't authenticated, such as anonymous visitors, aren't included. - "allUsers":A special identifier that represents anyone who is on the internet, including authenticated and unauthenticated users. Because BigQuery requires authentication before a user can access the service, allUsers includes only authenticated users. - */ - grantees?: Array; /** * Output only. The time when this row access policy was last modified, in milliseconds since the epoch. */ @@ -4798,49 +4748,6 @@ declare namespace bigquery { serdeInfo?: ISerDeInfo; }; - /** - * If the stored column was not used, explain why. - */ - type IStoredColumnsUnusedReason = { - /** - * Specifies the high-level reason for the unused scenario, each reason must have a code associated. - */ - code?: - | 'CODE_UNSPECIFIED' - | 'STORED_COLUMNS_COVER_INSUFFICIENT' - | 'BASE_TABLE_HAS_RLS' - | 'BASE_TABLE_HAS_CLS' - | 'UNSUPPORTED_PREFILTER' - | 'INTERNAL_ERROR' - | 'OTHER_REASON'; - /** - * Specifies the detailed description for the scenario. - */ - message?: string; - /** - * Specifies which columns were not covered by the stored columns for the specified code up to 20 columns. This is populated when the code is STORED_COLUMNS_COVER_INSUFFICIENT and BASE_TABLE_HAS_CLS. - */ - uncoveredColumns?: Array; - }; - - /** - * Indicates the stored columns usage in the query. - */ - type IStoredColumnsUsage = { - /** - * Specifies the base table. - */ - baseTable?: ITableReference; - /** - * Specifies whether the query was accelerated with stored columns. - */ - isQueryAccelerated?: boolean; - /** - * If stored columns were not used, explain why. - */ - storedColumnsUnusedReasons?: Array; - }; - type IStreamingbuffer = { /** * Output only. A lower-bound estimate of the number of bytes currently in the streaming buffer. @@ -4956,10 +4863,6 @@ declare namespace bigquery { * Output only. The geographic location where the table resides. This value is inherited from the dataset. */ location?: string; - /** - * Optional. If set, overrides the default managed table type configured in the dataset. - */ - managedTableType?: 'MANAGED_TABLE_TYPE_UNSPECIFIED' | 'NATIVE' | 'ICEBERG'; /** * Optional. The materialized view definition. */ @@ -5615,7 +5518,7 @@ declare namespace bigquery { */ colsampleBytree?: number; /** - * The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as `SUM(x)` or `SUM(x)/SUM(y)`, where x and y are column names from the base table. + * The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. */ contributionMetric?: string; /** @@ -5636,7 +5539,7 @@ declare namespace bigquery { | 'HOURLY' | 'PER_MINUTE'; /** - * The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data_type_properties + * The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties */ dataSplitColumn?: string; /** @@ -5685,14 +5588,6 @@ declare namespace bigquery { * Whether the model should include intercept during model training. */ fitIntercept?: boolean; - /** - * The forecast limit lower bound that was used during ARIMA model training with limits. To see more details of the algorithm: https://otexts.com/fpp2/limits.html - */ - forecastLimitLowerBound?: number; - /** - * The forecast limit upper bound that was used during ARIMA model training with limits. - */ - forecastLimitUpperBound?: number; /** * Hidden units for dnn models. */ @@ -6217,10 +6112,6 @@ declare namespace bigquery { | 'UNUSED' | 'PARTIALLY_USED' | 'FULLY_USED'; - /** - * Specifies the usage of stored columns in the query when stored columns are used in the query. - */ - storedColumnsUsages?: Array; }; /** @@ -6269,7 +6160,7 @@ declare namespace bigquery { */ type IGetParams = { /** - * Optional. The version of the access policy schema to fetch. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for conditional access policy binding in datasets must specify version 3. Dataset with no conditional role bindings in access policy may specify any valid value or leave the field unset. This field will be mapped to [IAM Policy version] (https://cloud.google.com/iam/docs/policies#versions) and will be used to fetch policy from IAM. If unset or if 0 or 1 value is used for dataset with conditional bindings, access entry with condition will have role string appended by 'withcond' string followed by a hash value. For example : { "access": [ { "role": "roles/bigquery.dataViewer_with_conditionalbinding_7a34awqsda", "userByEmail": "user@example.com", } ] } Please refer https://cloud.google.com/iam/docs/troubleshooting-withcond for more details. + * Optional. The version of the access policy schema to fetch. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for conditional access policy binding in datasets must specify version 3. Dataset with no conditional role bindings in access policy may specify any valid value or leave the field unset. This field will be maped to [IAM Policy version] (https://cloud.google.com/iam/docs/policies#versions) and will be used to fetch policy from IAM. If unset or if 0 or 1 value is used for dataset with conditional bindings, access entry with condition will have role string appended by 'withcond' string followed by a hash value. For example : { "access": [ { "role": "roles/bigquery.dataViewer_with_conditionalbinding_7a34awqsda", "userByEmail": "user@example.com", } ] } Please refer https://cloud.google.com/iam/docs/troubleshooting-withcond for more details. */ accessPolicyVersion?: number; /** @@ -6283,7 +6174,7 @@ declare namespace bigquery { */ type IInsertParams = { /** - * Optional. The version of the provided access policy schema. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. This version refers to the schema version of the access policy and not the version of access policy. This field's value can be equal or more than the access policy schema provided in the request. For example, * Requests with conditional access policy binding in datasets must specify version 3. * But dataset with no conditional role bindings in access policy may specify any valid value or leave the field unset. If unset or if 0 or 1 value is used for dataset with conditional bindings, request will be rejected. This field will be mapped to IAM Policy version (https://cloud.google.com/iam/docs/policies#versions) and will be used to set policy in IAM. + * Optional. The version of the provided access policy schema. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. This version refers to the schema version of the access policy and not the version of access policy. This field's value can be equal or more than the access policy schema provided in the request. For example, * Requests with conditional access policy binding in datasets must specify version 3. * But dataset with no conditional role bindings in access policy may specify any valid value or leave the field unset. If unset or if 0 or 1 value is used for dataset with conditional bindings, request will be rejected. This field will be maped to IAM Policy version (https://cloud.google.com/iam/docs/policies#versions) and will be used to set policy in IAM. */ accessPolicyVersion?: number; }; @@ -6315,7 +6206,7 @@ declare namespace bigquery { */ type IPatchParams = { /** - * Optional. The version of the provided access policy schema. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. This version refers to the schema version of the access policy and not the version of access policy. This field's value can be equal or more than the access policy schema provided in the request. For example, * Operations updating conditional access policy binding in datasets must specify version 3. Some of the operations are : - Adding a new access policy entry with condition. - Removing an access policy entry with condition. - Updating an access policy entry with condition. * But dataset with no conditional role bindings in access policy may specify any valid value or leave the field unset. If unset or if 0 or 1 value is used for dataset with conditional bindings, request will be rejected. This field will be mapped to IAM Policy version (https://cloud.google.com/iam/docs/policies#versions) and will be used to set policy in IAM. + * Optional. The version of the provided access policy schema. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. This version refers to the schema version of the access policy and not the version of access policy. This field's value can be equal or more than the access policy schema provided in the request. For example, * Operations updating conditional access policy binding in datasets must specify version 3. Some of the operations are : - Adding a new access policy entry with condition. - Removing an access policy entry with condition. - Updating an access policy entry with condition. * But dataset with no conditional role bindings in access policy may specify any valid value or leave the field unset. If unset or if 0 or 1 value is used for dataset with conditional bindings, request will be rejected. This field will be maped to IAM Policy version (https://cloud.google.com/iam/docs/policies#versions) and will be used to set policy in IAM. */ accessPolicyVersion?: number; }; @@ -6325,7 +6216,7 @@ declare namespace bigquery { */ type IUpdateParams = { /** - * Optional. The version of the provided access policy schema. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. This version refers to the schema version of the access policy and not the version of access policy. This field's value can be equal or more than the access policy schema provided in the request. For example, * Operations updating conditional access policy binding in datasets must specify version 3. Some of the operations are : - Adding a new access policy entry with condition. - Removing an access policy entry with condition. - Updating an access policy entry with condition. * But dataset with no conditional role bindings in access policy may specify any valid value or leave the field unset. If unset or if 0 or 1 value is used for dataset with conditional bindings, request will be rejected. This field will be mapped to IAM Policy version (https://cloud.google.com/iam/docs/policies#versions) and will be used to set policy in IAM. + * Optional. The version of the provided access policy schema. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. This version refers to the schema version of the access policy and not the version of access policy. This field's value can be equal or more than the access policy schema provided in the request. For example, * Operations updating conditional access policy binding in datasets must specify version 3. Some of the operations are : - Adding a new access policy entry with condition. - Removing an access policy entry with condition. - Updating an access policy entry with condition. * But dataset with no conditional role bindings in access policy may specify any valid value or leave the field unset. If unset or if 0 or 1 value is used for dataset with conditional bindings, request will be rejected. This field will be maped to IAM Policy version (https://cloud.google.com/iam/docs/policies#versions) and will be used to set policy in IAM. */ accessPolicyVersion?: number; }; @@ -6337,7 +6228,7 @@ declare namespace bigquery { */ type ICancelParams = { /** - * The geographic location of the job. You must [specify the location](https://cloud.google.com/bigquery/docs/locations#specify_locations) to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) + * The geographic location of the job. You must specify the location to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location. */ location?: string; }; @@ -6347,7 +6238,7 @@ declare namespace bigquery { */ type IDeleteParams = { /** - * The geographic location of the job. Required. For more information, see how to [specify locations](https://cloud.google.com/bigquery/docs/locations#specify_locations). + * The geographic location of the job. Required. See details at: https://cloud.google.com/bigquery/docs/locations#specifying_your_location. */ location?: string; }; @@ -6357,7 +6248,7 @@ declare namespace bigquery { */ type IGetParams = { /** - * The geographic location of the job. You must specify the location to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) For more information, see how to [specify locations](https://cloud.google.com/bigquery/docs/locations#specify_locations). + * The geographic location of the job. You must specify the location to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location. */ location?: string; }; @@ -6371,7 +6262,7 @@ declare namespace bigquery { */ 'formatOptions.useInt64Timestamp'?: boolean; /** - * The geographic location of the job. You must specify the location to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) For more information, see how to [specify locations](https://cloud.google.com/bigquery/docs/locations#specify_locations). + * The geographic location of the job. You must specify the location to run the job for the following scenarios: * If the location to run a job is not in the `us` or the `eu` multi-regional location * If the job's location is in a single region (for example, `us-central1`) For more information, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location. */ location?: string; /** @@ -6498,16 +6389,6 @@ declare namespace bigquery { } namespace rowAccessPolicies { - /** - * Deletes a row access policy. - */ - type IDeleteParams = { - /** - * If set to true, it deletes the row access policy even if it's the last row access policy on the table and the deletion will widen the access rather narrowing it. - */ - force?: boolean; - }; - /** * Lists all row access policies on the specified table. */ diff --git a/test/table.ts b/test/table.ts index 47d269f8..577a1ae3 100644 --- a/test/table.ts +++ b/test/table.ts @@ -2189,6 +2189,56 @@ describe('BigQuery/Table', () => { }); }); + it('should return selected fields after consecutive calls', done => { + const callSequence = [ + { + selectedFields: ['age'], + rows: [{f: [{v: 40}]}], + expected: [{age: 40}], + }, + { + selectedFields: ['name', 'address'], + rows: [{f: [{v: 'John'}, {v: '1234 Fake St, Springfield'}]}], + expected: [{name: 'John', address: '1234 Fake St, Springfield'}], + }, + { + selectedFields: ['age'], + rows: [{f: [{v: 50}]}], + expected: [{age: 50}], + }, + ]; + const schema = { + fields: [ + {name: 'name', type: 'string'}, + {name: 'age', type: 'INTEGER'}, + {name: 'address', type: 'string'}, + ], + }; + + table.metadata = {schema}; + + sandbox.restore(); + + for (const [i, call] of callSequence.entries()) { + table.request = ( + reqOpts: DecorateRequestOptions, + callback: Function + ) => { + callback(null, {rows: call.rows}); + }; + table.getRows( + {selectedFields: call.selectedFields.join(',')}, + (err: Error, rows: {}) => { + assert.ifError(err); + assert.deepStrictEqual(rows, call.expected); + if (i === callSequence.length - 1) { + done(); + } + } + ); + } + }); + it('should return selected fields from nested objects', done => { const selectedFields = 'objects.nested_object.nested_property_1'; const rows = [