-
Notifications
You must be signed in to change notification settings - Fork 443
/
Copy pathappend_rows.php
143 lines (134 loc) · 5.84 KB
/
append_rows.php
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
<?php
/*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* GENERATED CODE WARNING
* This file was automatically generated - do not edit!
*/
require_once __DIR__ . '/../../../vendor/autoload.php';
// [START bigquerystorage_v1_generated_BigQueryWrite_AppendRows_sync]
use Google\ApiCore\ApiException;
use Google\ApiCore\BidiStream;
use Google\Cloud\BigQuery\Storage\V1\AppendRowsRequest;
use Google\Cloud\BigQuery\Storage\V1\AppendRowsResponse;
use Google\Cloud\BigQuery\Storage\V1\Client\BigQueryWriteClient;
/**
* Appends data to the given stream.
*
* If `offset` is specified, the `offset` is checked against the end of
* stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
* attempt is made to append to an offset beyond the current end of the stream
* or `ALREADY_EXISTS` if user provides an `offset` that has already been
* written to. User can retry with adjusted offset within the same RPC
* connection. If `offset` is not specified, append happens at the end of the
* stream.
*
* The response contains an optional offset at which the append
* happened. No offset information will be returned for appends to a
* default stream.
*
* Responses are received in the same order in which requests are sent.
* There will be one response for each successful inserted request. Responses
* may optionally embed error information if the originating AppendRequest was
* not successfully processed.
*
* The specifics of when successfully appended data is made visible to the
* table are governed by the type of stream:
*
* * For COMMITTED streams (which includes the default stream), data is
* visible immediately upon successful append.
*
* * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
* rpc which advances a cursor to a newer offset in the stream.
*
* * For PENDING streams, data is not made visible until the stream itself is
* finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
* committed via the `BatchCommitWriteStreams` rpc.
*
* @param string $formattedWriteStream The write_stream identifies the append operation. It must be
* provided in the following scenarios:
*
* * In the first request to an AppendRows connection.
*
* * In all subsequent requests to an AppendRows connection, if you use the
* same connection to write to multiple tables or change the input schema for
* default streams.
*
* For explicitly created write streams, the format is:
*
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
*
* For the special default stream, the format is:
*
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
*
* An example of a possible sequence of requests with write_stream fields
* within a single connection:
*
* * r1: {write_stream: stream_name_1}
*
* * r2: {write_stream: /*omit*/}
*
* * r3: {write_stream: /*omit*/}
*
* * r4: {write_stream: stream_name_2}
*
* * r5: {write_stream: stream_name_2}
*
* The destination changed in request_4, so the write_stream field must be
* populated in all subsequent requests in this stream. Please see
* {@see BigQueryWriteClient::writeStreamName()} for help formatting this field.
*/
function append_rows_sample(string $formattedWriteStream): void
{
// Create a client.
$bigQueryWriteClient = new BigQueryWriteClient();
// Prepare the request message.
$request = (new AppendRowsRequest())
->setWriteStream($formattedWriteStream);
// Call the API and handle any network failures.
try {
/** @var BidiStream $stream */
$stream = $bigQueryWriteClient->appendRows();
$stream->writeAll([$request,]);
/** @var AppendRowsResponse $element */
foreach ($stream->closeWriteAndReadAll() as $element) {
printf('Element data: %s' . PHP_EOL, $element->serializeToJsonString());
}
} catch (ApiException $ex) {
printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage());
}
}
/**
* Helper to execute the sample.
*
* This sample has been automatically generated and should be regarded as a code
* template only. It will require modifications to work:
* - It may require correct/in-range values for request initialization.
* - It may require specifying regional endpoints when creating the service client,
* please see the apiEndpoint client configuration option for more details.
*/
function callSample(): void
{
$formattedWriteStream = BigQueryWriteClient::writeStreamName(
'[PROJECT]',
'[DATASET]',
'[TABLE]',
'[STREAM]'
);
append_rows_sample($formattedWriteStream);
}
// [END bigquerystorage_v1_generated_BigQueryWrite_AppendRows_sync]