• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

supabase / cli / 12941917448

24 Jan 2025 02:14AM UTC coverage: 58.238% (-0.05%) from 58.292%
12941917448

Pull #3046

github

web-flow
Merge 322044b46 into 337aacd0d
Pull Request #3046: feat(cli): add repeatable migrations

25 of 48 new or added lines in 4 files covered. (52.08%)

6 existing lines in 2 files now uncovered.

7617 of 13079 relevant lines covered (58.24%)

202.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.49
/pkg/migration/file.go
1
package migration
2

3
import (
4
        "context"
5
        "crypto/sha256"
6
        "encoding/hex"
7
        "io"
8
        "io/fs"
9
        "path/filepath"
10
        "regexp"
11

12
        "github.com/go-errors/errors"
13
        "github.com/jackc/pgconn"
14
        "github.com/jackc/pgtype"
15
        "github.com/jackc/pgx/v4"
16
        "github.com/spf13/viper"
17
        "github.com/supabase/cli/pkg/parser"
18
)
19

20
type MigrationFile struct {
21
        Version    string
22
        Name       string
23
        Statements []string
24
}
25

26
var migrateFilePattern = regexp.MustCompile(`^([0-9]+|r)_(.*)\.sql$`)
27

28
func NewMigrationFromFile(path string, fsys fs.FS) (*MigrationFile, error) {
7✔
29
        lines, err := parseFile(path, fsys)
7✔
30
        if err != nil {
9✔
31
                return nil, err
2✔
32
        }
2✔
33
        file := MigrationFile{Statements: lines}
5✔
34
        // Parse version from file name
5✔
35
        filename := filepath.Base(path)
5✔
36
        matches := migrateFilePattern.FindStringSubmatch(filename)
5✔
37
        if len(matches) > 2 {
10✔
38
                file.Version = matches[1]
5✔
39
                file.Name = matches[2]
5✔
40
        }
5✔
41
        // Repeatable migration version => r_name
42
        if file.Version == "r" {
5✔
NEW
43
                file.Version += "_" + file.Name
×
NEW
44
        }
×
45
        return &file, nil
5✔
46
}
47

48
func parseFile(path string, fsys fs.FS) ([]string, error) {
9✔
49
        sql, err := fsys.Open(path)
9✔
50
        if err != nil {
11✔
51
                return nil, errors.Errorf("failed to open migration file: %w", err)
2✔
52
        }
2✔
53
        defer sql.Close()
7✔
54
        // Unless explicitly specified, Use file length as max buffer size
7✔
55
        if !viper.IsSet("SCANNER_BUFFER_SIZE") {
14✔
56
                if fi, err := sql.Stat(); err == nil {
14✔
57
                        if size := int(fi.Size()); size > parser.MaxScannerCapacity {
8✔
58
                                parser.MaxScannerCapacity = size
1✔
59
                        }
1✔
60
                }
61
        }
62
        return parser.SplitAndTrim(sql)
7✔
63
}
64

65
func NewMigrationFromReader(sql io.Reader) (*MigrationFile, error) {
1✔
66
        lines, err := parser.SplitAndTrim(sql)
1✔
67
        if err != nil {
2✔
68
                return nil, err
1✔
69
        }
1✔
70
        return &MigrationFile{Statements: lines}, nil
×
71
}
72

73
func (m *MigrationFile) ExecBatch(ctx context.Context, conn *pgx.Conn) error {
8✔
74
        // Batch migration commands, without using statement cache
8✔
75
        batch := &pgconn.Batch{}
8✔
76
        for _, line := range m.Statements {
17✔
77
                batch.ExecParams(line, nil, nil, nil, nil)
9✔
78
        }
9✔
79
        // Insert into migration history
80
        if len(m.Version) > 0 {
12✔
81
                if err := m.insertVersionSQL(conn, batch); err != nil {
4✔
82
                        return err
×
83
                }
×
84
        }
85
        // ExecBatch is implicitly transactional
86
        if result, err := conn.PgConn().ExecBatch(ctx, batch).ReadAll(); err != nil {
12✔
87
                // Defaults to printing the last statement on error
4✔
88
                stat := INSERT_MIGRATION_VERSION
4✔
89
                i := len(result)
4✔
90
                if i < len(m.Statements) {
8✔
91
                        stat = m.Statements[i]
4✔
92
                }
4✔
93
                return errors.Errorf("%w\nAt statement %d: %s", err, i, stat)
4✔
94
        }
95
        return nil
4✔
96
}
97

98
func (m *MigrationFile) insertVersionSQL(conn *pgx.Conn, batch *pgconn.Batch) error {
4✔
99
        value := pgtype.TextArray{}
4✔
100
        if err := value.Set(m.Statements); err != nil {
4✔
101
                return errors.Errorf("failed to set text array: %w", err)
×
102
        }
×
103
        ci := conn.ConnInfo()
4✔
104
        var err error
4✔
105
        var encoded []byte
4✔
106
        var valueFormat int16
4✔
107
        if conn.Config().PreferSimpleProtocol {
4✔
108
                encoded, err = value.EncodeText(ci, encoded)
×
109
                valueFormat = pgtype.TextFormatCode
×
110
        } else {
4✔
111
                encoded, err = value.EncodeBinary(ci, encoded)
4✔
112
                valueFormat = pgtype.BinaryFormatCode
4✔
113
        }
4✔
114
        if err != nil {
4✔
115
                return errors.Errorf("failed to encode binary: %w", err)
×
116
        }
×
117
        batch.ExecParams(
4✔
118
                INSERT_MIGRATION_VERSION,
4✔
119
                [][]byte{[]byte(m.Version), []byte(m.Name), encoded},
4✔
120
                []uint32{pgtype.TextOID, pgtype.TextOID, pgtype.TextArrayOID},
4✔
121
                []int16{pgtype.TextFormatCode, pgtype.TextFormatCode, valueFormat},
4✔
122
                nil,
4✔
123
        )
4✔
124
        return nil
4✔
125
}
126

127
type SeedFile struct {
128
        Path  string
129
        Hash  string
130
        Dirty bool `db:"-"`
131
}
132

133
func NewSeedFile(path string, fsys fs.FS) (*SeedFile, error) {
5✔
134
        sql, err := fsys.Open(path)
5✔
135
        if err != nil {
6✔
136
                return nil, errors.Errorf("failed to open seed file: %w", err)
1✔
137
        }
1✔
138
        defer sql.Close()
4✔
139
        hash := sha256.New()
4✔
140
        if _, err := io.Copy(hash, sql); err != nil {
4✔
141
                return nil, errors.Errorf("failed to hash file: %w", err)
×
142
        }
×
143
        digest := hex.EncodeToString(hash.Sum(nil))
4✔
144
        return &SeedFile{Path: path, Hash: digest}, nil
4✔
145
}
146

147
func (m *SeedFile) ExecBatchWithCache(ctx context.Context, conn *pgx.Conn, fsys fs.FS) error {
2✔
148
        // Parse each file individually to reduce memory usage
2✔
149
        lines, err := parseFile(m.Path, fsys)
2✔
150
        if err != nil {
2✔
151
                return err
×
152
        }
×
153
        // Data statements don't mutate schemas, safe to use statement cache
154
        batch := pgx.Batch{}
2✔
155
        if !m.Dirty {
3✔
156
                for _, line := range lines {
2✔
157
                        batch.Queue(line)
1✔
158
                }
1✔
159
        }
160
        batch.Queue(UPSERT_SEED_FILE, m.Path, m.Hash)
2✔
161
        // No need to track version here because there are no schema changes
2✔
162
        if err := conn.SendBatch(ctx, &batch).Close(); err != nil {
3✔
163
                return errors.Errorf("failed to send batch: %w", err)
1✔
164
        }
1✔
165
        return nil
1✔
166
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc