Skip to content

Commit

Permalink
Add a pgroll pull subcommand (#463)
Browse files Browse the repository at this point in the history
Add a new `pgroll pull` command that pulls the complete migration
history for a schema from the `migrations` table in the target database
and dumps the migrations to disk.

## Example

Given the migration history in `pgroll.migrations` after applying all
`example/` migrations in this repo:

```
+--------+-----------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------->
| schema | name                                    | migration                                                                                                                                                                                                                                              >
|--------+-----------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------->
| public | 01_create_tables                        | {"name": "01_create_tables", "operations": [{"create_table": {"name": "customers", "columns": [{"pk": true, "name": "id", "type": "integer"}, {"name": "name", "type": "varchar(255)", "unique": true}, {"name": "credit_card", "type": "text", "nullab>
| public | 02_create_another_table                 | {"name": "02_create_another_table", "operations": [{"create_table": {"name": "products", "columns": [{"pk": true, "name": "id", "type": "serial"}, {"name": "name", "type": "varchar(255)", "unique": true}, {"name": "price", "type": "decimal(10,2)"}>
| public | 03_add_column_to_products               | {"name": "03_add_column_to_products", "operations": [{"add_column": {"up": "UPPER(name)", "table": "products", "column": {"name": "description", "type": "varchar(255)", "nullable": true}}}, {"add_column": {"table": "products", "column": {"name": ">
| public | 04_rename_table                         | {"name": "04_rename_table", "operations": [{"rename_table": {"to": "clients", "from": "customers"}}]}                                                                                                                                                  >
| public | 05_sql                                  | {"name": "05_sql", "operations": [{"sql": {"up": "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)", "down": "DROP TABLE users"}}]}                                                                                                              >
| public | 06_add_column_to_sql_table              | {"name": "06_add_column_to_sql_table", "operations": [{"add_column": {"up": "UPPER(name)", "table": "users", "column": {"name": "description", "type": "varchar(255)", "nullable": true}}}]}                                                           >
...
```

Run:

```bash
$ pgroll pull migrations/
```

This will pull all migrations from the `pgroll.migrations` table into
the `migrations/` directory:

```
$ ls migrations/

01_create_tables.json
02_create_another_table.json
03_add_column_to_products.json
04_rename_table.json
05_sql.json
06_add_column_to_sql_table.json
...
```

The optional `--with-prefixes` flag prefixes each migration name with
its position in the schema history:

```bash
$ pgroll pull --with-prefixes migrations/
```

This produces the following files:

```
$ ls migrations/

0001_01_create_tables.json
0002_02_create_another_table.json
0003_03_add_column_to_products.json
0004_04_rename_table.json
0005_05_sql.json
0006_06_add_column_to_sql_table.json
...
```

The `--with-prefixes` flag ensures that files are sorted
lexicographically by their time of application.
  • Loading branch information
andrew-farries authored Nov 14, 2024
1 parent a5fb72e commit 30d131e
Show file tree
Hide file tree
Showing 5 changed files with 263 additions and 0 deletions.
56 changes: 56 additions & 0 deletions cmd/pull.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// SPDX-License-Identifier: Apache-2.0

package cmd

import (
"fmt"

"github.com/xataio/pgroll/cmd/flags"
"github.com/xataio/pgroll/pkg/state"

"github.com/spf13/cobra"
)

func pullCmd() *cobra.Command {
opts := map[string]string{
"p": "prefix each migration filename with its position in the schema history",
}
var withPrefixes bool

pullCmd := &cobra.Command{
Use: "pull <target directory>",
Short: "pull migration history from the target database and write it to disk",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
targetDir := args[0]

state, err := state.New(ctx, flags.PostgresURL(), flags.StateSchema())
if err != nil {
return err
}
defer state.Close()

migs, err := state.SchemaHistory(ctx, flags.Schema())
if err != nil {
return fmt.Errorf("failed to read schema history: %w", err)
}

for i, mig := range migs {
prefix := ""
if withPrefixes {
prefix = fmt.Sprintf("%04d", i+1) + "_"
}
err := mig.WriteToFile(targetDir, prefix)
if err != nil {
return fmt.Errorf("failed to write migration %q: %w", mig.Migration.Name, err)
}
}
return nil
},
}

pullCmd.Flags().BoolVarP(&withPrefixes, "with-prefixes", "p", false, opts["p"])

return pullCmd
}
1 change: 1 addition & 0 deletions cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ func Execute() error {
rootCmd.AddCommand(initCmd)
rootCmd.AddCommand(statusCmd)
rootCmd.AddCommand(bootstrapCmd)
rootCmd.AddCommand(pullCmd())

return rootCmd.Execute()
}
43 changes: 43 additions & 0 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
* [complete](#complete)
* [rollback](#rollback)
* [status](#status)
* [pull](#pull)
* [Operations reference](#operations-reference)
* [Add column](#add-column)
* [Alter column](#alter-column)
Expand Down Expand Up @@ -536,6 +537,7 @@ The `pgroll` CLI offers the following subcommands:
* [complete](#complete)
* [rollback](#rollback)
* [status](#status)
* [pull](#pull)

The `pgroll` CLI has the following top-level flags:
* `--postgres-url`: The URL of the postgres instance against which migrations will be run.
Expand Down Expand Up @@ -660,6 +662,47 @@ $ pgroll status --schema schema_a
}
```

### Pull

`pgroll pull` pulls the complete schema history of applied migrations from the target database and writes the migrations to disk.

Assuming that all [example migrations](https://github.com/xataio/pgroll/tree/main/examples) have been applied, running:

```
$ pgroll pull migrations/
```

will write the complete schema history as `.json` files to the `migrations/` directory:

```
$ ls migrations/
01_create_tables.json
02_create_another_table.json
03_add_column_to_products.json
04_rename_table.json
05_sql.json
06_add_column_to_sql_table.json
...
```

The command takes an optional `--with-prefixes` flag which will write each filename prefixed with its position in the schema history:

```
$ ls migrations/
0001_01_create_tables.json
0002_02_create_another_table.json
0003_03_add_column_to_products.json
0004_04_rename_table.json
0005_05_sql.json
0006_06_add_column_to_sql_table.json
...
```
The `--with-prefixes` flag ensures that files are sorted lexicographically by their time of application.

If the directory specified as the required argument to `pgroll pull` does not exist, `pgroll pull` will create it.

## Operations reference

`pgroll` migrations are specified as JSON files. All migrations follow the same basic structure:
Expand Down
84 changes: 84 additions & 0 deletions pkg/state/history.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
// SPDX-License-Identifier: Apache-2.0

package state

import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"

"github.com/lib/pq"
"github.com/xataio/pgroll/pkg/migrations"
)

// Migration represents a single migration in the migration history
// of a schema
type Migration struct {
Migration migrations.Migration
CreatedAt time.Time
}

// SchemaHistory returns all migrations applied to a schema in ascending
// timestamp order
func (s *State) SchemaHistory(ctx context.Context, schema string) ([]Migration, error) {
rows, err := s.pgConn.QueryContext(ctx,
fmt.Sprintf(`SELECT name, migration, created_at
FROM %s.migrations
WHERE schema=$1 ORDER BY created_at`,
pq.QuoteIdentifier(s.schema)), schema)
if err != nil {
return nil, err
}

defer rows.Close()

var entries []Migration
for rows.Next() {
var name, rawMigration string
var createdAt time.Time

rows.Scan(&name, &rawMigration, &createdAt)
if err != nil {
return nil, err
}

var mig migrations.Migration
err = json.Unmarshal([]byte(rawMigration), &mig)
if err != nil {
return nil, err
}

entries = append(entries, Migration{
Migration: mig,
CreatedAt: createdAt,
})
}

return entries, nil
}

// WriteToFile writes the migration to a file in `targetDir`, prefixing the
// filename with `prefix`.
func (m *Migration) WriteToFile(targetDir, prefix string) error {
err := os.MkdirAll(targetDir, 0o755)
if err != nil {
return err
}

fileName := fmt.Sprintf("%s%s.json", prefix, m.Migration.Name)
filePath := filepath.Join(targetDir, fileName)

file, err := os.Create(filePath)
if err != nil {
return err
}
defer file.Close()

encoder := json.NewEncoder(file)
encoder.SetIndent("", " ")

return encoder.Encode(m.Migration)
}
79 changes: 79 additions & 0 deletions pkg/state/history_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
// SPDX-License-Identifier: Apache-2.0

package state_test

import (
"context"
"database/sql"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/xataio/pgroll/internal/testutils"
"github.com/xataio/pgroll/pkg/migrations"
"github.com/xataio/pgroll/pkg/state"
)

func TestSchemaHistoryReturnsFullSchemaHistory(t *testing.T) {
t.Parallel()

testutils.WithStateAndConnectionToContainer(t, func(state *state.State, db *sql.DB) {
ctx := context.Background()
migs := []migrations.Migration{
{
Name: "01_add_table",
Operations: migrations.Operations{
&migrations.OpCreateTable{
Name: "users",
Columns: []migrations.Column{
{
Name: "id",
Type: "serial",
Pk: ptr(true),
},
{
Name: "username",
Type: "text",
Nullable: ptr(false),
},
},
},
},
},
{
Name: "02_set_nullable",
Operations: migrations.Operations{
&migrations.OpAlterColumn{
Table: "users",
Column: "username",
Nullable: ptr(false),
Up: "username",
},
},
},
}

// Start and complete both migrations
for _, mig := range migs {
_, err := state.Start(ctx, "public", &mig)
require.NoError(t, err)
err = state.Complete(ctx, "public", mig.Name)
require.NoError(t, err)
}

// Ensure that the schema history is correct
res, err := state.SchemaHistory(ctx, "public")
require.NoError(t, err)

assert.Equal(t, 2, len(res))
assert.Equal(t, migs[0].Name, res[0].Migration.Name)
assert.Equal(t, migs[1].Name, res[1].Migration.Name)

assert.Equal(t, migs[0].Operations, res[0].Migration.Operations)
assert.Equal(t, migs[1].Operations, res[1].Migration.Operations)
})
}

func ptr[T any](v T) *T {
return &v
}

0 comments on commit 30d131e

Please sign in to comment.