-
Notifications
You must be signed in to change notification settings - Fork 1.3k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(bigquery/storage/managedwriter): refactor AppendResponse (#6402)
* feat(bigquery/storage/managedwriter): refactor AppendResponse The potential fields exposed within an AppendResponse has grown as the API has evolved. This PR refactors AppendResult to use a retained reference of the response for servicing requests. This allows the logic for processing the response to be centralized a bit more within the AppendResult. We also introduce a new FullResponse() on the AppendResult which returns the full AppendRowsResponse if present.
- Loading branch information
Showing
5 changed files
with
165 additions
and
86 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -24,8 +24,7 @@ feature-rich successor to the classic BigQuery streaming interface, which is pre | |
in cloud.google.com/go/bigquery, and the tabledata.insertAll method if you're more familiar with the BigQuery v2 REST | ||
methods. | ||
Creating a Client | ||
# Creating a Client | ||
To start working with this package, create a client: | ||
|
@@ -35,8 +34,7 @@ To start working with this package, create a client: | |
// TODO: Handle error. | ||
} | ||
Defining the Protocol Buffer Schema | ||
# Defining the Protocol Buffer Schema | ||
The write functionality of BigQuery Storage requires data to be sent using encoded | ||
protocol buffer messages using proto2 wire format. As the protocol buffer is not | ||
|
@@ -70,7 +68,7 @@ contains functionality to normalize the descriptor into a self-contained definit | |
The adapt subpackage also contains functionality for generating a DescriptorProto using | ||
a BigQuery table's schema directly. | ||
Constructing a ManagedStream | ||
# Constructing a ManagedStream | ||
The ManagedStream handles management of the underlying write connection to the BigQuery | ||
Storage service. You can either create a write session explicitly and pass it in, or | ||
|
@@ -102,7 +100,7 @@ In addition, NewManagedStream can create new streams implicitly: | |
// TODO: Handle error. | ||
} | ||
Writing Data | ||
# Writing Data | ||
Use the AppendRows function to write one or more serialized proto messages to a stream. You | ||
can choose to specify an offset in the stream to handle de-duplication for user-created streams, | ||
|
@@ -111,42 +109,40 @@ but a "default" stream neither accepts nor reports offsets. | |
AppendRows returns a future-like object that blocks until the write is successful or yields | ||
an error. | ||
// Define a couple of messages. | ||
mesgs := []*myprotopackage.MyCompiledMessage{ | ||
{ | ||
UserName: proto.String("johndoe"), | ||
EmailAddress: proto.String("[email protected]", | ||
FavoriteNumbers: []proto.Int64{1,42,12345}, | ||
}, | ||
{ | ||
UserName: proto.String("janesmith"), | ||
EmailAddress: proto.String("[email protected]", | ||
FavoriteNumbers: []proto.Int64{1,3,5,7,9}, | ||
}, | ||
} | ||
// Define a couple of messages. | ||
mesgs := []*myprotopackage.MyCompiledMessage{ | ||
{ | ||
UserName: proto.String("johndoe"), | ||
EmailAddress: proto.String("[email protected]", | ||
FavoriteNumbers: []proto.Int64{1,42,12345}, | ||
}, | ||
{ | ||
UserName: proto.String("janesmith"), | ||
EmailAddress: proto.String("[email protected]", | ||
FavoriteNumbers: []proto.Int64{1,3,5,7,9}, | ||
}, | ||
} | ||
// Encode the messages into binary format. | ||
encoded := make([][]byte, len(mesgs)) | ||
for k, v := range mesgs{ | ||
b, err := proto.Marshal(v) | ||
// Encode the messages into binary format. | ||
encoded := make([][]byte, len(mesgs)) | ||
for k, v := range mesgs{ | ||
b, err := proto.Marshal(v) | ||
if err != nil { | ||
// TODO: Handle error. | ||
} | ||
encoded[k] = b | ||
} | ||
// Send the rows to the service, and specify an offset for managing deduplication. | ||
result, err := managedStream.AppendRows(ctx, encoded, WithOffset(0)) | ||
// Block until the write is complete and return the result. | ||
returnedOffset, err := result.GetResult(ctx) | ||
if err != nil { | ||
// TODO: Handle error. | ||
} | ||
encoded[k] = b | ||
} | ||
// Send the rows to the service, and specify an offset for managing deduplication. | ||
result, err := managedStream.AppendRows(ctx, encoded, WithOffset(0)) | ||
// Block until the write is complete and return the result. | ||
returnedOffset, err := result.GetResult(ctx) | ||
if err != nil { | ||
// TODO: Handle error. | ||
} | ||
Buffered Stream Management | ||
# Buffered Stream Management | ||
For Buffered streams, users control when data is made visible in the destination table/stream | ||
independently of when it is written. Use FlushRows on the ManagedStream to advance the flush | ||
|
@@ -156,12 +152,11 @@ point ahead in the stream. | |
// ahead to make the first 1000 rows available. | ||
flushOffset, err := managedStream.FlushRows(ctx, 1000) | ||
Pending Stream Management | ||
# Pending Stream Management | ||
Pending streams allow users to commit data from multiple streams together once the streams | ||
have been finalized, meaning they'll no longer allow further data writes. | ||
// First, finalize the stream we're writing into. | ||
totalRows, err := managedStream.Finalize(ctx) | ||
if err != nil { | ||
|
@@ -175,6 +170,5 @@ have been finalized, meaning they'll no longer allow further data writes. | |
// Using the client, we can commit data from multple streams to the same | ||
// table atomically. | ||
resp, err := client.BatchCommitWriteStreams(ctx, req) | ||
*/ | ||
package managedwriter |
Oops, something went wrong.