From 201c44e74c3f7f5dae678fd68262afa2e8fec1cc Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Fri, 5 Apr 2019 03:35:34 +0200 Subject: [PATCH] bump github.com/lib/pq v1.9.0 full diff: https://github.com/lib/pq/compare/0dad96c0b94f8dee039aa40467f767467392a0af...v1.9.0 Signed-off-by: Sebastiaan van Stijn --- go.mod | 2 +- go.sum | 4 +- vendor/github.com/lib/pq/.gitignore | 2 + vendor/github.com/lib/pq/.travis.sh | 73 ++ vendor/github.com/lib/pq/.travis.yml | 83 +- vendor/github.com/lib/pq/CONTRIBUTING.md | 29 - vendor/github.com/lib/pq/README.md | 87 +- vendor/github.com/lib/pq/TESTS.md | 33 + vendor/github.com/lib/pq/array.go | 895 ++++++++++++++++ vendor/github.com/lib/pq/buf.go | 2 +- vendor/github.com/lib/pq/conn.go | 1041 ++++++++++++------- vendor/github.com/lib/pq/conn_go18.go | 165 +++ vendor/github.com/lib/pq/connector.go | 115 ++ vendor/github.com/lib/pq/copy.go | 65 +- vendor/github.com/lib/pq/doc.go | 78 +- vendor/github.com/lib/pq/encode.go | 226 ++-- vendor/github.com/lib/pq/error.go | 18 +- vendor/github.com/lib/pq/go.mod | 3 + vendor/github.com/lib/pq/krb.go | 27 + vendor/github.com/lib/pq/notice.go | 71 ++ vendor/github.com/lib/pq/notify.go | 150 ++- vendor/github.com/lib/pq/oid/types.go | 184 +++- vendor/github.com/lib/pq/rows.go | 93 ++ vendor/github.com/lib/pq/scram/scram.go | 264 +++++ vendor/github.com/lib/pq/ssl.go | 175 ++++ vendor/github.com/lib/pq/ssl_permissions.go | 20 + vendor/github.com/lib/pq/ssl_windows.go | 9 + vendor/github.com/lib/pq/url.go | 8 +- vendor/github.com/lib/pq/user_posix.go | 2 +- vendor/github.com/lib/pq/uuid.go | 23 + vendor/modules.txt | 3 +- 31 files changed, 3262 insertions(+), 688 deletions(-) create mode 100644 vendor/github.com/lib/pq/.travis.sh delete mode 100644 vendor/github.com/lib/pq/CONTRIBUTING.md create mode 100644 vendor/github.com/lib/pq/TESTS.md create mode 100644 vendor/github.com/lib/pq/array.go create mode 100644 vendor/github.com/lib/pq/conn_go18.go create mode 100644 vendor/github.com/lib/pq/connector.go create mode 100644 vendor/github.com/lib/pq/go.mod create mode 100644 vendor/github.com/lib/pq/krb.go create mode 100644 vendor/github.com/lib/pq/notice.go create mode 100644 vendor/github.com/lib/pq/rows.go create mode 100644 vendor/github.com/lib/pq/scram/scram.go create mode 100644 vendor/github.com/lib/pq/ssl.go create mode 100644 vendor/github.com/lib/pq/ssl_permissions.go create mode 100644 vendor/github.com/lib/pq/ssl_windows.go create mode 100644 vendor/github.com/lib/pq/uuid.go diff --git a/go.mod b/go.mod index 0f682d932..ce0646ac1 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/jinzhu/now v1.1.1 // indirect github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f + github.com/lib/pq v1.9.0 github.com/magiconair/properties v1.5.3 // indirect github.com/mattn/go-sqlite3 v1.6.0 github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect diff --git a/go.sum b/go.sum index 8b768f79b..73ff43def 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f h1:I1iYfgQavGa2tgdgKn+2Qg1yQhHEETvh/mNSxG3x5c0= -github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.5.3 h1:C8fxWnhYyME3n0klPOhVM7PtYUB3eV1W3DeFmN3j53Y= github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-sqlite3 v1.6.0 h1:TDwTWbeII+88Qy55nWlof0DclgAtI4LqGujkYMzmQII= diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore index 0f1d00e11..3243952a4 100644 --- a/vendor/github.com/lib/pq/.gitignore +++ b/vendor/github.com/lib/pq/.gitignore @@ -2,3 +2,5 @@ *.test *~ *.swp +.idea +.vscode \ No newline at end of file diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh new file mode 100644 index 000000000..ebf447030 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +set -eu + +client_configure() { + sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key +} + +pgdg_repository() { + local sourcelist='sources.list.d/postgresql.list' + + curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - + echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" + sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update +} + +postgresql_configure() { + sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config + local all all trust + hostnossl all pqgossltest 127.0.0.1/32 reject + hostnossl all pqgosslcert 127.0.0.1/32 reject + hostssl all pqgossltest 127.0.0.1/32 trust + hostssl all pqgosslcert 127.0.0.1/32 cert + host all all 127.0.0.1/32 trust + hostnossl all pqgossltest ::1/128 reject + hostnossl all pqgosslcert ::1/128 reject + hostssl all pqgossltest ::1/128 trust + hostssl all pqgosslcert ::1/128 cert + host all all ::1/128 trust + config + + xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates + certs/root.crt + certs/server.crt + certs/server.key + certificates + + sort -VCu <<-versions || + $PGVERSION + 9.2 + versions + sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config + ssl_ca_file = 'root.crt' + ssl_cert_file = 'server.crt' + ssl_key_file = 'server.key' + config + + echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null + + sudo service postgresql restart +} + +postgresql_install() { + xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages + postgresql-$PGVERSION + postgresql-server-dev-$PGVERSION + postgresql-contrib-$PGVERSION + packages +} + +postgresql_uninstall() { + sudo service postgresql stop + xargs sudo apt-get -y --purge remove <<-packages + libpq-dev + libpq5 + postgresql + postgresql-client-common + postgresql-common + packages + sudo rm -rf /var/lib/postgresql +} + +$1 diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml index 9bf683730..f378207f2 100644 --- a/vendor/github.com/lib/pq/.travis.yml +++ b/vendor/github.com/lib/pq/.travis.yml @@ -1,43 +1,11 @@ language: go go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - tip + - 1.14.x + - 1.15.x + - master -before_install: - - psql --version - - sudo /etc/init.d/postgresql stop - - sudo apt-get -y --purge remove postgresql libpq-dev libpq5 postgresql-client-common postgresql-common - - sudo rm -rf /var/lib/postgresql - - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - - - sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list" - - sudo apt-get update -qq - - sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION - - sudo chmod 777 /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "local all postgres trust" > /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "local all all trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostnossl all pqgossltest 127.0.0.1/32 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostnossl all pqgosslcert 127.0.0.1/32 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostssl all pqgossltest 127.0.0.1/32 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostssl all pqgosslcert 127.0.0.1/32 cert" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "host all all 127.0.0.1/32 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostnossl all pqgossltest ::1/128 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostnossl all pqgosslcert ::1/128 reject" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostssl all pqgossltest ::1/128 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "hostssl all pqgosslcert ::1/128 cert" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - echo "host all all ::1/128 trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf - - sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ certs/server.key certs/server.crt certs/root.crt - - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_cert_file = 'server.crt'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" - - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_key_file = 'server.key'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" - - sudo bash -c "[[ '${PGVERSION}' < '9.2' ]] || (echo \"ssl_ca_file = 'root.crt'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf)" - - sudo sh -c "echo 127.0.0.1 postgres >> /etc/hosts" - - sudo ls -l /var/lib/postgresql/$PGVERSION/main/ - - sudo cat /etc/postgresql/$PGVERSION/main/postgresql.conf - - sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key - - sudo /etc/init.d/postgresql restart +sudo: true env: global: @@ -45,24 +13,33 @@ env: - PQGOSSLTESTS=1 - PQSSLCERTTEST_PATH=$PWD/certs - PGHOST=127.0.0.1 + - GODEBUG=x509ignoreCN=0 matrix: - - PGVERSION=9.4 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.3 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.2 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.1 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.0 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=8.4 PQTEST_BINARY_PARAMETERS=yes - - PGVERSION=9.4 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.3 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.2 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.1 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=9.0 PQTEST_BINARY_PARAMETERS=no - - PGVERSION=8.4 PQTEST_BINARY_PARAMETERS=no + - PGVERSION=10 + - PGVERSION=9.6 + - PGVERSION=9.5 + - PGVERSION=9.4 -script: - - go test -v ./... +before_install: + - ./.travis.sh postgresql_uninstall + - ./.travis.sh pgdg_repository + - ./.travis.sh postgresql_install + - ./.travis.sh postgresql_configure + - ./.travis.sh client_configure + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/lint/golint + - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2020.1.3 before_script: - - psql -c 'create database pqgotest' -U postgres - - psql -c 'create user pqgossltest' -U postgres - - psql -c 'create user pqgosslcert' -U postgres + - createdb pqgotest + - createuser -DRS pqgossltest + - createuser -DRS pqgosslcert + +script: + - > + goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' + - go vet ./... + - staticcheck -go 1.13 ./... + - golint ./... + - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... + - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md deleted file mode 100644 index 84c937f15..000000000 --- a/vendor/github.com/lib/pq/CONTRIBUTING.md +++ /dev/null @@ -1,29 +0,0 @@ -## Contributing to pq - -`pq` has a backlog of pull requests, but contributions are still very -much welcome. You can help with patch review, submitting bug reports, -or adding new functionality. There is no formal style guide, but -please conform to the style of existing code and general Go formatting -conventions when submitting patches. - -### Patch review - -Help review existing open pull requests by commenting on the code or -proposed functionality. - -### Bug reports - -We appreciate any bug reports, but especially ones with self-contained -(doesn't depend on code outside of pq), minimal (can't be simplified -further) test cases. It's especially helpful if you can submit a pull -request with just the failing test case (you'll probably want to -pattern it after the tests in -[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). - -### New functionality - -There are a number of pending patches for new functionality, so -additional feature patches will take a while to merge. Still, patches -are generally reviewed based on usefulness and complexity in addition -to time-in-queue, so if you have a knockout idea, take a shot. Feel -free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md index 358d644f6..c972a86a5 100644 --- a/vendor/github.com/lib/pq/README.md +++ b/vendor/github.com/lib/pq/README.md @@ -1,31 +1,11 @@ # pq - A pure Go postgres driver for Go's database/sql package -[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq) +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) ## Install go get github.com/lib/pq -## Docs - -For detailed documentation and basic usage examples, please see the package -documentation at . - -## Tests - -`go test` is used for testing. A running PostgreSQL server is -required, with the ability to log in. The default database to connect -to test with is "pqgotest," but it can be overridden using environment -variables. - -Example: - - PGHOST=/var/run/postgresql go test github.com/lib/pq - -Optionally, a benchmark suite can be run as part of the tests: - - PGHOST=/var/run/postgresql go test -bench . - ## Features * SSL @@ -38,66 +18,13 @@ Optionally, a benchmark suite can be run as part of the tests: * Many libpq compatible environment variables * Unix socket support * Notifications: `LISTEN`/`NOTIFY` +* pgpass support +* GSS (Kerberos) auth -## Future / Things you can help with - -* Better COPY FROM / COPY TO (see discussion in #181) +## Tests -## Thank you (alphabetical) +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. -Some of these contributors are from the original library `bmizerany/pq.go` whose -code still exists in here. +## Status -* Andy Balholm (andybalholm) -* Ben Berkert (benburkert) -* Benjamin Heatwole (bheatwole) -* Bill Mill (llimllib) -* Bjørn Madsen (aeons) -* Blake Gentry (bgentry) -* Brad Fitzpatrick (bradfitz) -* Charlie Melbye (cmelbye) -* Chris Bandy (cbandy) -* Chris Gilling (cgilling) -* Chris Walsh (cwds) -* Dan Sosedoff (sosedoff) -* Daniel Farina (fdr) -* Eric Chlebek (echlebek) -* Eric Garrido (minusnine) -* Eric Urban (hydrogen18) -* Everyone at The Go Team -* Evan Shaw (edsrzf) -* Ewan Chou (coocood) -* Federico Romero (federomero) -* Fumin (fumin) -* Gary Burd (garyburd) -* Heroku (heroku) -* James Pozdena (jpoz) -* Jason McVetta (jmcvetta) -* Jeremy Jay (pbnjay) -* Joakim Sernbrant (serbaut) -* John Gallagher (jgallagher) -* Jonathan Rudenberg (titanous) -* Joël Stemmer (jstemmer) -* Kamil Kisiel (kisielk) -* Kelly Dunn (kellydunn) -* Keith Rarick (kr) -* Kir Shatrov (kirs) -* Lann Martin (lann) -* Maciek Sakrejda (deafbybeheading) -* Marc Brinkmann (mbr) -* Marko Tiikkaja (johto) -* Matt Newberry (MattNewberry) -* Matt Robenolt (mattrobenolt) -* Martin Olsen (martinolsen) -* Mike Lewis (mikelikespie) -* Nicolas Patry (Narsil) -* Oliver Tonnhofer (olt) -* Patrick Hayes (phayes) -* Paul Hammond (paulhammond) -* Ryan Smith (ryandotsmith) -* Samuel Stauffer (samuel) -* Timothée Peignier (cyberdelia) -* Travis Cline (tmc) -* TruongSinh Tran-Nguyen (truongsinh) -* Yaismel Miranda (ympons) -* notedit (notedit) +This package is effectively in maintenance mode and is not actively developed. Small patches and features are only rarely reviewed and merged. We recommend using [pgx](https://github.com/jackc/pgx) which is actively maintained. diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md new file mode 100644 index 000000000..f05021115 --- /dev/null +++ b/vendor/github.com/lib/pq/TESTS.md @@ -0,0 +1,33 @@ +# Tests + +## Running Tests + +`go test` is used for testing. A running PostgreSQL +server is required, with the ability to log in. The +database to connect to test with is "pqgotest," on +"localhost" but these can be overridden using [environment +variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). + +Example: + + PGHOST=/run/postgresql go test + +## Benchmarks + +A benchmark suite can be run as part of the tests: + + go test -bench . + +## Example setup (Docker) + +Run a postgres container: + +``` +docker run --expose 5432:5432 postgres +``` + +Run tests: + +``` +PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test +``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 000000000..405da2368 --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,895 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []float32: + return (*Float32Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []int32: + return (*Int32Array)(&a) + case []string: + return (*StringArray)(&a) + case [][]byte: + return (*ByteaArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]float32: + return (*Float32Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]int32: + return (*Int32Array)(a) + case *[]string: + return (*StringArray)(a) + case *[][]byte: + return (*ByteaArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// Float32Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float32Array []float32 + +// Scan implements the sql.Scanner interface. +func (a *Float32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float32Array", src) +} + +func (a *Float32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float32Array, len(elems)) + for i, v := range elems { + var x float64 + if x, err = strconv.ParseFloat(string(v), 32); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = float32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// Int32Array represents a one-dimensional array of the PostgreSQL integer types. +type Int32Array []int32 + +// Scan implements the sql.Scanner interface. +func (a *Int32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int32Array", src) +} + +func (a *Int32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int32Array, len(elems)) + for i, v := range elems { + var x int + if x, err = strconv.Atoi(string(v)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = int32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, int64(a[0]), 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, int64(a[i]), 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go index 666b0012a..4b0a0a8f7 100644 --- a/vendor/github.com/lib/pq/buf.go +++ b/vendor/github.com/lib/pq/buf.go @@ -66,7 +66,7 @@ func (b *writeBuf) int16(n int) { } func (b *writeBuf) string(s string) { - b.buf = append(b.buf, (s + "\000")...) + b.buf = append(append(b.buf, s...), '\000') } func (b *writeBuf) byte(c byte) { diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go index a5f1eab9f..db0b6cef5 100644 --- a/vendor/github.com/lib/pq/conn.go +++ b/vendor/github.com/lib/pq/conn.go @@ -2,16 +2,15 @@ package pq import ( "bufio" + "context" "crypto/md5" - "crypto/tls" - "crypto/x509" + "crypto/sha256" "database/sql" "database/sql/driver" "encoding/binary" "errors" "fmt" "io" - "io/ioutil" "net" "os" "os/user" @@ -19,10 +18,12 @@ import ( "path/filepath" "strconv" "strings" + "sync/atomic" "time" "unicode" "github.com/lib/pq/oid" + "github.com/lib/pq/scram" ) // Common error types @@ -30,18 +31,31 @@ var ( ErrNotSupported = errors.New("pq: Unsupported command") ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") - ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less.") - ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly.") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") ) -type drv struct{} +// Compile time validation that our types implement the expected interfaces +var ( + _ driver.Driver = Driver{} +) -func (d *drv) Open(name string) (driver.Conn, error) { +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d Driver) Open(name string) (driver.Conn, error) { return Open(name) } func init() { - sql.Register("postgres", &drv{}) + sql.Register("postgres", &Driver{}) } type parameterStatus struct { @@ -77,18 +91,32 @@ func (s transactionStatus) String() string { panic("not reached") } +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. type Dialer interface { Dial(network, address string) (net.Conn, error) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) } -type defaultDialer struct{} +// DialerContext is the context-aware dialer interface. +type DialerContext interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +type defaultDialer struct { + d net.Dialer +} -func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) { - return net.Dial(ntw, addr) +func (d defaultDialer) Dial(network, address string) (net.Conn, error) { + return d.d.Dial(network, address) +} +func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return d.DialContext(ctx, network, address) } -func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout(ntw, addr, timeout) +func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return d.d.DialContext(ctx, network, address) } type conn struct { @@ -97,6 +125,15 @@ type conn struct { namei int scratch [512]byte txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int parameterStatus parameterStatus @@ -105,7 +142,7 @@ type conn struct { // If true, this connection is bad and all public-facing functions should // return ErrBadConn. - bad bool + bad *atomic.Value // If set, this connection should never use the binary format when // receiving query results from prepared statements. Only provided for @@ -115,12 +152,24 @@ type conn struct { // Whether to always send []byte parameters over as binary. Enables single // round-trip mode for non-prepared Query calls. binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool + + // If not nil, notices will be synchronously sent here + noticeHandler func(*Error) + + // If not nil, notifications will be synchronously sent here + notificationHandler func(*Notification) + + // GSSAPI context + gss GSS } // Handle driver-side settings in parsed connection string. -func (c *conn) handleDriverSettings(o values) (err error) { +func (cn *conn) handleDriverSettings(o values) (err error) { boolSetting := func(key string, val *bool) error { - if value := o.Get(key); value != "" { + if value, ok := o[key]; ok { if value == "yes" { *val = true } else if value == "no" { @@ -132,179 +181,219 @@ func (c *conn) handleDriverSettings(o values) (err error) { return nil } - err = boolSetting("disable_prepared_binary_result", &c.disablePreparedBinaryResult) + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) if err != nil { return err } - err = boolSetting("binary_parameters", &c.binaryParameters) + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) if err != nil { - return err + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } } - return nil } -func (c *conn) writeBuf(b byte) *writeBuf { - c.scratch[0] = b +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b return &writeBuf{ - buf: c.scratch[:5], + buf: cn.scratch[:5], pos: 1, } } -func Open(name string) (_ driver.Conn, err error) { - return DialOpen(defaultDialer{}, name) +// Open opens a new connection to the database. dsn is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(dsn string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, dsn) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { + c, err := NewConnector(dsn) + if err != nil { + return nil, err + } + c.dialer = d + return c.open(context.Background()) } -func DialOpen(d Dialer, name string) (_ driver.Conn, err error) { +func (c *Connector) open(ctx context.Context) (cn *conn, err error) { // Handle any panics during connection initialization. Note that we // specifically do *not* want to use errRecover(), as that would turn any // connection errors into ErrBadConns, hiding the real error message from // the user. defer errRecoverNoErrBadConn(&err) - o := make(values) + o := c.opts - // A number of defaults are applied here, in this order: - // - // * Very low precedence defaults applied in every situation - // * Environment variables - // * Explicitly passed connection information - o.Set("host", "localhost") - o.Set("port", "5432") - // N.B.: Extra float digits should be set to 3, but that breaks - // Postgres 8.4 and older, where the max is 2. - o.Set("extra_float_digits", "2") - for k, v := range parseEnviron(os.Environ()) { - o.Set(k, v) - } - - if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") { - name, err = ParseURL(name) - if err != nil { - return nil, err - } + bad := &atomic.Value{} + bad.Store(false) + cn = &conn{ + opts: o, + dialer: c.dialer, + bad: bad, } - - if err := parseOpts(name, o); err != nil { + err = cn.handleDriverSettings(o) + if err != nil { return nil, err } + cn.handlePgpass(o) - // Use the "fallback" application name if necessary - if fallback := o.Get("fallback_application_name"); fallback != "" { - if !o.Isset("application_name") { - o.Set("application_name", fallback) - } - } - - // We can't work with any client_encoding other than UTF-8 currently. - // However, we have historically allowed the user to set it to UTF-8 - // explicitly, and there's no reason to break such programs, so allow that. - // Note that the "options" setting could also set client_encoding, but - // parsing its value is not worth it. Instead, we always explicitly send - // client_encoding as a separate run-time parameter, which should override - // anything set in options. - if enc := o.Get("client_encoding"); enc != "" && !isUTF8(enc) { - return nil, errors.New("client_encoding must be absent or 'UTF8'") - } - o.Set("client_encoding", "UTF8") - // DateStyle needs a similar treatment. - if datestyle := o.Get("datestyle"); datestyle != "" { - if datestyle != "ISO, MDY" { - panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v", - "ISO, MDY", datestyle)) - } - } else { - o.Set("datestyle", "ISO, MDY") - } - - // If a user is not provided by any other means, the last - // resort is to use the current operating system provided user - // name. - if o.Get("user") == "" { - u, err := userCurrent() - if err != nil { - return nil, err - } else { - o.Set("user", u) - } - } - - cn := &conn{} - err = cn.handleDriverSettings(o) + cn.c, err = dial(ctx, c.dialer, o) if err != nil { return nil, err } - cn.c, err = dial(d, o) + err = cn.ssl(o) if err != nil { + if cn.c != nil { + cn.c.Close() + } return nil, err } - cn.ssl(o) + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + cn.buf = bufio.NewReader(cn.c) cn.startup(o) // reset the deadline, in case one was set (see dial) - if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { err = cn.c.SetDeadline(time.Time{}) } + panicking = false return cn, err } -func dial(d Dialer, o values) (net.Conn, error) { - ntw, addr := network(o) - // SSL is not necessary or supported over UNIX domain sockets - if ntw == "unix" { - o["sslmode"] = "disable" - } +func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { + network, address := network(o) // Zero or not specified means wait indefinitely. - if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { seconds, err := strconv.ParseInt(timeout, 10, 0) if err != nil { return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) } duration := time.Duration(seconds) * time.Second + // connect_timeout should apply to the entire connection establishment // procedure, so we both use a timeout for the TCP connection // establishment and set a deadline for doing the initial handshake. // The deadline is then reset after startup() is done. deadline := time.Now().Add(duration) - conn, err := d.DialTimeout(ntw, addr, duration) + var conn net.Conn + if dctx, ok := d.(DialerContext); ok { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + conn, err = dctx.DialContext(ctx, network, address) + } else { + conn, err = d.DialTimeout(network, address, duration) + } if err != nil { return nil, err } err = conn.SetDeadline(deadline) return conn, err } - return d.Dial(ntw, addr) + if dctx, ok := d.(DialerContext); ok { + return dctx.DialContext(ctx, network, address) + } + return d.Dial(network, address) } func network(o values) (string, string) { - host := o.Get("host") + host := o["host"] if strings.HasPrefix(host, "/") { - sockPath := path.Join(host, ".s.PGSQL."+o.Get("port")) + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) return "unix", sockPath } - return "tcp", host + ":" + o.Get("port") + return "tcp", net.JoinHostPort(host, o["port"]) } type values map[string]string -func (vs values) Set(k, v string) { - vs[k] = v -} - -func (vs values) Get(k string) (v string) { - return vs[k] -} - -func (vs values) Isset(k string) bool { - _, ok := vs[k] - return ok -} - // scanner implements a tokenizer for libpq-style option strings. type scanner struct { s []rune @@ -375,7 +464,7 @@ func parseOpts(name string, o values) error { // Skip any whitespace after the = if r, ok = s.SkipSpaces(); !ok { // If we reach the end here, the last value is just an empty string as per libpq. - o.Set(string(keyRunes), "") + o[string(keyRunes)] = "" break } @@ -410,7 +499,7 @@ func parseOpts(name string, o values) error { } } - o.Set(string(keyRunes), string(valRunes)) + o[string(keyRunes)] = string(valRunes) } return nil @@ -421,37 +510,61 @@ func (cn *conn) isInTransaction() bool { cn.txnStatus == txnStatusInFailedTransaction } +func (cn *conn) setBad() { + if cn.bad != nil { + cn.bad.Store(true) + } +} + +func (cn *conn) getBad() bool { + if cn.bad != nil { + return cn.bad.Load().(bool) + } + return false +} + func (cn *conn) checkIsInTransaction(intxn bool) { if cn.isInTransaction() != intxn { - cn.bad = true + cn.setBad() errorf("unexpected transaction status %v", cn.txnStatus) } } func (cn *conn) Begin() (_ driver.Tx, err error) { - if cn.bad { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if cn.getBad() { return nil, driver.ErrBadConn } defer cn.errRecover(&err) cn.checkIsInTransaction(false) - _, commandTag, err := cn.simpleExec("BEGIN") + _, commandTag, err := cn.simpleExec("BEGIN" + mode) if err != nil { return nil, err } if commandTag != "BEGIN" { - cn.bad = true + cn.setBad() return nil, fmt.Errorf("unexpected command tag %s", commandTag) } if cn.txnStatus != txnStatusIdleInTransaction { - cn.bad = true + cn.setBad() return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) } return cn, nil } +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + func (cn *conn) Commit() (err error) { - if cn.bad { + defer cn.closeTxn() + if cn.getBad() { return driver.ErrBadConn } defer cn.errRecover(&err) @@ -464,7 +577,7 @@ func (cn *conn) Commit() (err error) { // would get the same behaviour if you issued a COMMIT in a failed // transaction, so it's also the least surprising thing to do here. if cn.txnStatus == txnStatusInFailedTransaction { - if err := cn.Rollback(); err != nil { + if err := cn.rollback(); err != nil { return err } return ErrInFailedTransaction @@ -473,12 +586,12 @@ func (cn *conn) Commit() (err error) { _, commandTag, err := cn.simpleExec("COMMIT") if err != nil { if cn.isInTransaction() { - cn.bad = true + cn.setBad() } return err } if commandTag != "COMMIT" { - cn.bad = true + cn.setBad() return fmt.Errorf("unexpected command tag %s", commandTag) } cn.checkIsInTransaction(false) @@ -486,16 +599,20 @@ func (cn *conn) Commit() (err error) { } func (cn *conn) Rollback() (err error) { - if cn.bad { + defer cn.closeTxn() + if cn.getBad() { return driver.ErrBadConn } defer cn.errRecover(&err) + return cn.rollback() +} +func (cn *conn) rollback() (err error) { cn.checkIsInTransaction(true) _, commandTag, err := cn.simpleExec("ROLLBACK") if err != nil { if cn.isInTransaction() { - cn.bad = true + cn.setBad() } return err } @@ -523,14 +640,19 @@ func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err res, commandTag = cn.parseComplete(r.string()) case 'Z': cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } // done return case 'E': err = parseError(r) - case 'T', 'D', 'I': + case 'I': + res = emptyRows + case 'T', 'D': // ignore any results default: - cn.bad = true + cn.setBad() errorf("unknown response for simple query: %q", t) } } @@ -539,8 +661,6 @@ func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err func (cn *conn) simpleQuery(q string) (res *rows, err error) { defer cn.errRecover(&err) - st := &stmt{cn: cn, name: ""} - b := cn.writeBuf('Q') b.string(q) cn.send(b) @@ -554,16 +674,24 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) { // the user can close, though, to avoid connections from being // leaked. A "rows" with done=true works fine for that purpose. if err != nil { - cn.bad = true + cn.setBad() errorf("unexpected message %q in simple query execution", t) } - res = &rows{ - cn: cn, - colNames: st.colNames, - colTyps: st.colTyps, - colFmts: st.colFmts, - done: true, + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' { + res.result, res.tag = cn.parseComplete(r.string()) + if res.colNames != nil { + return + } } + res.done = true case 'Z': cn.processReadyForQuery(r) // done @@ -573,7 +701,7 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) { err = parseError(r) case 'D': if res == nil { - cn.bad = true + cn.setBad() errorf("unexpected DataRow in simple query execution") } // the query didn't fail; kick off to Next @@ -583,20 +711,34 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) { // res might be non-nil here if we received a previous // CommandComplete, but that's fine; just overwrite it res = &rows{cn: cn} - res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r) + res.rowsHeader = parsePortalRowDescribe(r) // To work around a bug in QueryRow in Go 1.2 and earlier, wait // until the first DataRow has been received. default: - cn.bad = true + cn.setBad() errorf("unknown response for simple query: %q", t) } } } +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + // Decides which column formats to use for a prepared statement. The input is // an array of type oids, one element per result column. -func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, colFmtData []byte) { +func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { if len(colTyps) == 0 { return nil, colFmtDataAllText } @@ -608,8 +750,8 @@ func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, c allBinary := true allText := true - for i, o := range colTyps { - switch o { + for i, t := range colTyps { + switch t.OID { // This is the list of types to use binary mode for when receiving them // through a prepared statement. If a type appears in this list, it // must also be implemented in binaryDecode in encode.go. @@ -620,6 +762,8 @@ func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, c case oid.T_int4: fallthrough case oid.T_int2: + fallthrough + case oid.T_uuid: colFmts[i] = formatBinary allText = false @@ -665,38 +809,51 @@ func (cn *conn) prepareTo(q, stmtName string) *stmt { } func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { - if cn.bad { + if cn.getBad() { return nil, driver.ErrBadConn } defer cn.errRecover(&err) if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { - return cn.prepareCopyIn(q) + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err } return cn.prepareTo(q, cn.gname()), nil } func (cn *conn) Close() (err error) { - if cn.bad { - return driver.ErrBadConn - } + // Skip cn.bad return here because we always want to close a connection. defer cn.errRecover(&err) + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + // Don't go through send(); ListenerConn relies on us not scribbling on the // scratch buffer of this connection. - err = cn.sendSimpleMessage('X') - if err != nil { - return err - } - - return cn.c.Close() + return cn.sendSimpleMessage('X') } // Implement the "Queryer" interface -func (cn *conn) Query(query string, args []driver.Value) (_ driver.Rows, err error) { - if cn.bad { +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if cn.getBad() { return nil, driver.ErrBadConn } + if cn.inCopy { + return nil, errCopyInProgress + } defer cn.errRecover(&err) // Check to see if we can use the "simpleQuery" interface, which is @@ -711,24 +868,21 @@ func (cn *conn) Query(query string, args []driver.Value) (_ driver.Rows, err err cn.readParseResponse() cn.readBindResponse() rows := &rows{cn: cn} - rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse() + rows.rowsHeader = cn.readPortalDescribeResponse() cn.postExecuteWorkaround() return rows, nil - } else { - st := cn.prepareTo(query, "") - st.exec(args) - return &rows{ - cn: cn, - colNames: st.colNames, - colTyps: st.colTyps, - colFmts: st.colFmts, - }, nil } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + rowsHeader: st.rowsHeader, + }, nil } // Implement the optional "Execer" interface for one-shot queries func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { - if cn.bad { + if cn.getBad() { return nil, driver.ErrBadConn } defer cn.errRecover(&err) @@ -750,38 +904,41 @@ func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err cn.postExecuteWorkaround() res, _, err = cn.readExecuteResponse("Execute") return res, err - } else { - // Use the unnamed statement to defer planning until bind - // time, or else value-based selectivity estimates cannot be - // used. - st := cn.prepareTo(query, "") - r, err := st.Exec(args) - if err != nil { - panic(err) - } - return r, err } -} - -func (cn *conn) send(m *writeBuf) { - _, err := cn.c.Write(m.wrap()) + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) if err != nil { panic(err) } + return r, err } -func (cn *conn) sendStartupPacket(m *writeBuf) { - // sanity check - if m.buf[0] != 0 { - panic("oops") - } +type safeRetryError struct { + Err error +} - _, err := cn.c.Write((m.wrap())[1:]) +func (se *safeRetryError) Error() string { + return se.Err.Error() +} + +func (cn *conn) send(m *writeBuf) { + n, err := cn.c.Write(m.wrap()) if err != nil { + if n == 0 { + err = &safeRetryError{Err: err} + } panic(err) } } +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + // Send a message of type typ to the server on the other end of cn. The // message should have no payload. This method does not use the scratch // buffer. @@ -797,7 +954,7 @@ func (cn *conn) sendSimpleMessage(typ byte) (err error) { // the message yourself. func (cn *conn) saveMessage(typ byte, buf *readBuf) { if cn.saveMessageType != 0 { - cn.bad = true + cn.setBad() errorf("unexpected saveMessageType %d", cn.saveMessageType) } cn.saveMessageType = typ @@ -851,12 +1008,17 @@ func (cn *conn) recv() (t byte, r *readBuf) { if err != nil { panic(err) } - switch t { case 'E': panic(parseError(r)) case 'N': - // ignore + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } default: return } @@ -873,8 +1035,14 @@ func (cn *conn) recv1Buf(r *readBuf) byte { } switch t { - case 'A', 'N': - // ignore + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } case 'S': cn.processParameterStatus(r) default: @@ -892,150 +1060,35 @@ func (cn *conn) recv1() (t byte, r *readBuf) { return t, r } -func (cn *conn) ssl(o values) { - verifyCaOnly := false - tlsConf := tls.Config{} - switch mode := o.Get("sslmode"); mode { - case "require", "": - tlsConf.InsecureSkipVerify = true - case "verify-ca": - // We must skip TLS's own verification since it requires full - // verification since Go 1.3. - tlsConf.InsecureSkipVerify = true - verifyCaOnly = true - case "verify-full": - tlsConf.ServerName = o.Get("host") - case "disable": - return - default: - errorf(`unsupported sslmode %q; only "require" (default), "verify-full", and "disable" supported`, mode) +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err } - cn.setupSSLClientCertificates(&tlsConf, o) - cn.setupSSLCA(&tlsConf, o) + if upgrade == nil { + // Nothing to do + return nil + } w := cn.writeBuf(0) w.int32(80877103) - cn.sendStartupPacket(w) + if err = cn.sendStartupPacket(w); err != nil { + return err + } b := cn.scratch[:1] - _, err := io.ReadFull(cn.c, b) + _, err = io.ReadFull(cn.c, b) if err != nil { - panic(err) + return err } if b[0] != 'S' { - panic(ErrSSLNotSupported) - } - - client := tls.Client(cn.c, &tlsConf) - if verifyCaOnly { - cn.verifyCA(client, &tlsConf) - } - cn.c = client -} - -// verifyCA carries out a TLS handshake to the server and verifies the -// presented certificate against the effective CA, i.e. the one specified in -// sslrootcert or the system CA if sslrootcert was not specified. -func (cn *conn) verifyCA(client *tls.Conn, tlsConf *tls.Config) { - err := client.Handshake() - if err != nil { - panic(err) - } - certs := client.ConnectionState().PeerCertificates - opts := x509.VerifyOptions{ - DNSName: client.ConnectionState().ServerName, - Intermediates: x509.NewCertPool(), - Roots: tlsConf.RootCAs, - } - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - _, err = certs[0].Verify(opts) - if err != nil { - panic(err) - } -} - -// This function sets up SSL client certificates based on either the "sslkey" -// and "sslcert" settings (possibly set via the environment variables PGSSLKEY -// and PGSSLCERT, respectively), or if they aren't set, from the .postgresql -// directory in the user's home directory. If the file paths are set -// explicitly, the files must exist. The key file must also not be -// world-readable, or this function will panic with -// ErrSSLKeyHasWorldPermissions. -func (cn *conn) setupSSLClientCertificates(tlsConf *tls.Config, o values) { - var missingOk bool - - sslkey := o.Get("sslkey") - sslcert := o.Get("sslcert") - if sslkey != "" && sslcert != "" { - // If the user has set an sslkey and sslcert, they *must* exist. - missingOk = false - } else { - // Automatically load certificates from ~/.postgresql. - user, err := user.Current() - if err != nil { - // user.Current() might fail when cross-compiling. We have to - // ignore the error and continue without client certificates, since - // we wouldn't know where to load them from. - return - } - - sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") - sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") - missingOk = true - } - - // Check that both files exist, and report the error or stop, depending on - // which behaviour we want. Note that we don't do any more extensive - // checks than this (such as checking that the paths aren't directories); - // LoadX509KeyPair() will take care of the rest. - keyfinfo, err := os.Stat(sslkey) - if err != nil && missingOk { - return - } else if err != nil { - panic(err) - } - _, err = os.Stat(sslcert) - if err != nil && missingOk { - return - } else if err != nil { - panic(err) - } - - // If we got this far, the key file must also have the correct permissions - kmode := keyfinfo.Mode() - if kmode != kmode&0600 { - panic(ErrSSLKeyHasWorldPermissions) - } - - cert, err := tls.LoadX509KeyPair(sslcert, sslkey) - if err != nil { - panic(err) + return ErrSSLNotSupported } - tlsConf.Certificates = []tls.Certificate{cert} -} -// Sets up RootCAs in the TLS configuration if sslrootcert is set. -func (cn *conn) setupSSLCA(tlsConf *tls.Config, o values) { - if sslrootcert := o.Get("sslrootcert"); sslrootcert != "" { - tlsConf.RootCAs = x509.NewCertPool() - - cert, err := ioutil.ReadFile(sslrootcert) - if err != nil { - panic(err) - } - - ok := tlsConf.RootCAs.AppendCertsFromPEM(cert) - if !ok { - errorf("couldn't parse pem in sslrootcert") - } - } + cn.c, err = upgrade(cn.c) + return err } // isDriverSetting returns true iff a setting is purely for configuring the @@ -1057,7 +1110,10 @@ func isDriverSetting(key string) bool { return true case "binary_parameters": return true - + case "krbsrvname": + return true + case "krbspn": + return true default: return false } @@ -1084,12 +1140,15 @@ func (cn *conn) startup(o values) { w.string(v) } w.string("") - cn.sendStartupPacket(w) + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } for { t, r := cn.recv() switch t { case 'K': + cn.processBackendKeyData(r) case 'S': cn.processParameterStatus(r) case 'R': @@ -1109,7 +1168,7 @@ func (cn *conn) auth(r *readBuf, o values) { // OK case 3: w := cn.writeBuf('p') - w.string(o.Get("password")) + w.string(o["password"]) cn.send(w) t, r := cn.recv() @@ -1123,7 +1182,7 @@ func (cn *conn) auth(r *readBuf, o values) { case 5: s := string(r.next(4)) w := cn.writeBuf('p') - w.string("md5" + md5s(md5s(o.Get("password")+o.Get("user"))+s)) + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) cn.send(w) t, r := cn.recv() @@ -1134,6 +1193,108 @@ func (cn *conn) auth(r *readBuf, o values) { if r.int32() != 0 { errorf("unexpected authentication response: %q", t) } + case 7: // GSSAPI, startup + if newGss == nil { + errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") + } + cli, err := newGss() + if err != nil { + errorf("kerberos error: %s", err.Error()) + } + + var token []byte + + if spn, ok := o["krbspn"]; ok { + // Use the supplied SPN if provided.. + token, err = cli.GetInitTokenFromSpn(spn) + } else { + // Allow the kerberos service name to be overridden + service := "postgres" + if val, ok := o["krbsrvname"]; ok { + service = val + } + + token, err = cli.GetInitToken(o["host"], service) + } + + if err != nil { + errorf("failed to get Kerberos ticket: %q", err) + } + + w := cn.writeBuf('p') + w.bytes(token) + cn.send(w) + + // Store for GSSAPI continue message + cn.gss = cli + + case 8: // GSSAPI continue + + if cn.gss == nil { + errorf("GSSAPI protocol error") + } + + b := []byte(*r) + + done, tokOut, err := cn.gss.Continue(b) + if err == nil && !done { + w := cn.writeBuf('p') + w.bytes(tokOut) + cn.send(w) + } + + // Errors fall through and read the more detailed message + // from the server.. + + case 10: + sc := scram.NewClient(sha256.New, o["user"], o["password"]) + sc.Step(nil) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + scOut := sc.Out() + + w := cn.writeBuf('p') + w.string("SCRAM-SHA-256") + w.int32(len(scOut)) + w.bytes(scOut) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 11 { + errorf("unexpected authentication response: %q", t) + } + + nextStep := r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + scOut = sc.Out() + w = cn.writeBuf('p') + w.bytes(scOut) + cn.send(w) + + t, r = cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 12 { + errorf("unexpected authentication response: %q", t) + } + + nextStep = r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + default: errorf("unknown authentication response: %d", code) } @@ -1145,18 +1306,16 @@ const formatText format = 0 const formatBinary format = 1 // One result-column format code with the value 1 (i.e. all binary). -var colFmtDataAllBinary []byte = []byte{0, 1, 0, 1} +var colFmtDataAllBinary = []byte{0, 1, 0, 1} // No result-column format codes (i.e. all text). -var colFmtDataAllText []byte = []byte{0, 0} +var colFmtDataAllText = []byte{0, 0} type stmt struct { - cn *conn - name string - colNames []string - colFmts []format + cn *conn + name string + rowsHeader colFmtData []byte - colTyps []oid.Oid paramTyps []oid.Oid closed bool } @@ -1165,7 +1324,7 @@ func (st *stmt) Close() (err error) { if st.closed { return nil } - if st.cn.bad { + if st.cn.getBad() { return driver.ErrBadConn } defer st.cn.errRecover(&err) @@ -1179,14 +1338,14 @@ func (st *stmt) Close() (err error) { t, _ := st.cn.recv1() if t != '3' { - st.cn.bad = true + st.cn.setBad() errorf("unexpected close response: %q", t) } st.closed = true t, r := st.cn.recv1() if t != 'Z' { - st.cn.bad = true + st.cn.setBad() errorf("expected ready for query, but got: %q", t) } st.cn.processReadyForQuery(r) @@ -1195,22 +1354,20 @@ func (st *stmt) Close() (err error) { } func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { - if st.cn.bad { + if st.cn.getBad() { return nil, driver.ErrBadConn } defer st.cn.errRecover(&err) st.exec(v) return &rows{ - cn: st.cn, - colNames: st.colNames, - colTyps: st.colTyps, - colFmts: st.colFmts, + cn: st.cn, + rowsHeader: st.rowsHeader, }, nil } func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { - if st.cn.bad { + if st.cn.getBad() { return nil, driver.ErrBadConn } defer st.cn.errRecover(&err) @@ -1297,7 +1454,7 @@ func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { parts := strings.Split(commandTag, " ") if len(parts) != 3 { - cn.bad = true + cn.setBad() errorf("unexpected INSERT command tag %s", commandTag) } affectedRows = &parts[len(parts)-1] @@ -1309,29 +1466,46 @@ func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { } n, err := strconv.ParseInt(*affectedRows, 10, 64) if err != nil { - cn.bad = true + cn.setBad() errorf("could not parse commandTag: %s", err) } return driver.RowsAffected(n), commandTag } -type rows struct { - cn *conn +type rowsHeader struct { colNames []string - colTyps []oid.Oid + colTyps []fieldDesc colFmts []format - done bool - rb readBuf +} + +type rows struct { + cn *conn + finish func() + rowsHeader + done bool + rb readBuf + result driver.Result + tag string + + next *rowsHeader } func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } // no need to look at cn.bad as Next() will for { err := rs.Next(nil) switch err { case nil: case io.EOF: - return nil + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } default: return err } @@ -1342,13 +1516,24 @@ func (rs *rows) Columns() []string { return rs.colNames } +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + func (rs *rows) Next(dest []driver.Value) (err error) { if rs.done { return io.EOF } conn := rs.cn - if conn.bad { + if conn.getBad() { return driver.ErrBadConn } defer conn.errRecover(&err) @@ -1359,6 +1544,9 @@ func (rs *rows) Next(dest []driver.Value) (err error) { case 'E': err = parseError(&rs.rb) case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } continue case 'Z': conn.processReadyForQuery(&rs.rb) @@ -1370,7 +1558,7 @@ func (rs *rows) Next(dest []driver.Value) (err error) { case 'D': n := rs.rb.int16() if err != nil { - conn.bad = true + conn.setBad() errorf("unexpected DataRow after error %s", err) } if n < len(dest) { @@ -1382,21 +1570,40 @@ func (rs *rows) Next(dest []driver.Value) (err error) { dest[i] = nil continue } - dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i], rs.colFmts[i]) + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) } return + case 'T': + next := parsePortalRowDescribe(&rs.rb) + rs.next = &next + return io.EOF default: errorf("unexpected message after execute: %q", t) } } } +func (rs *rows) HasNextResultSet() bool { + hasNext := rs.next != nil && !rs.done + return hasNext +} + +func (rs *rows) NextResultSet() error { + if rs.next == nil { + return io.EOF + } + rs.rowsHeader = *rs.next + rs.next = nil + return nil +} + // QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be // used as part of an SQL statement. For example: // // tblname := "my_table" // data := "my_data" -// err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", pq.QuoteIdentifier(tblname)), data) +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) // // Any double quotes in name will be escaped. The quoted identifier will be // case sensitive when used in a query. If the input string contains a zero @@ -1409,6 +1616,39 @@ func QuoteIdentifier(name string) string { return `"` + strings.Replace(name, `"`, `""`, -1) + `"` } +// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal +// to DDL and other statements that do not accept parameters) to be used as part +// of an SQL statement. For example: +// +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// +// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be +// replaced by two backslashes (i.e. "\\") and the C-style escape identifier +// that PostgreSQL provides ('E') will be prepended to the string. +func QuoteLiteral(literal string) string { + // This follows the PostgreSQL internal algorithm for handling quoted literals + // from libpq, which can be found in the "PQEscapeStringInternal" function, + // which is found in the libpq/fe-exec.c source file: + // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c + // + // substitute any single-quotes (') with two single-quotes ('') + literal = strings.Replace(literal, `'`, `''`, -1) + // determine if the string has any backslashes (\) in it. + // if it does, replace any backslashes (\) with two backslashes (\\) + // then, we need to wrap the entire string with a PostgreSQL + // C-style escape. Per how "PQEscapeStringInternal" handles this case, we + // also add a space before the "E" + if strings.Contains(literal, `\`) { + literal = strings.Replace(literal, `\`, `\\`, -1) + literal = ` E'` + literal + `'` + } else { + // otherwise, we can just wrap the literal with a pair of single quotes + literal = `'` + literal + `'` + } + return literal +} + func md5s(s string) string { h := md5.New() h.Write([]byte(s)) @@ -1477,7 +1717,7 @@ func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { cn.send(b) } -func (c *conn) processParameterStatus(r *readBuf) { +func (cn *conn) processParameterStatus(r *readBuf) { var err error param := r.string() @@ -1488,13 +1728,13 @@ func (c *conn) processParameterStatus(r *readBuf) { var minor int _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) if err == nil { - c.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor } case "TimeZone": - c.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) if err != nil { - c.parameterStatus.currentLocation = nil + cn.parameterStatus.currentLocation = nil } default: @@ -1502,8 +1742,8 @@ func (c *conn) processParameterStatus(r *readBuf) { } } -func (c *conn) processReadyForQuery(r *readBuf) { - c.txnStatus = transactionStatus(r.byte()) +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) } func (cn *conn) readReadyForQuery() { @@ -1513,11 +1753,16 @@ func (cn *conn) readReadyForQuery() { cn.processReadyForQuery(r) return default: - cn.bad = true + cn.setBad() errorf("unexpected message %q; expected ReadyForQuery", t) } } +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + func (cn *conn) readParseResponse() { t, r := cn.recv1() switch t { @@ -1528,12 +1773,12 @@ func (cn *conn) readParseResponse() { cn.readReadyForQuery() panic(err) default: - cn.bad = true + cn.setBad() errorf("unexpected Parse response %q", t) } } -func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []oid.Oid) { +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { for { t, r := cn.recv1() switch t { @@ -1553,25 +1798,25 @@ func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames [ cn.readReadyForQuery() panic(err) default: - cn.bad = true + cn.setBad() errorf("unexpected Describe statement response %q", t) } } } -func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []oid.Oid) { +func (cn *conn) readPortalDescribeResponse() rowsHeader { t, r := cn.recv1() switch t { case 'T': return parsePortalRowDescribe(r) case 'n': - return nil, nil, nil + return rowsHeader{} case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: - cn.bad = true + cn.setBad() errorf("unexpected Describe response %q", t) } panic("not reached") @@ -1587,7 +1832,7 @@ func (cn *conn) readBindResponse() { cn.readReadyForQuery() panic(err) default: - cn.bad = true + cn.setBad() errorf("unexpected Bind response %q", t) } } @@ -1614,7 +1859,7 @@ func (cn *conn) postExecuteWorkaround() { cn.saveMessage(t, r) return default: - cn.bad = true + cn.setBad() errorf("unexpected message during extended query execution: %q", t) } } @@ -1626,49 +1871,69 @@ func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, co t, r := cn.recv1() switch t { case 'C': + if err != nil { + cn.setBad() + errorf("unexpected CommandComplete after error %s", err) + } res, commandTag = cn.parseComplete(r.string()) case 'Z': cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } return res, commandTag, err case 'E': err = parseError(r) case 'T', 'D', 'I': + if err != nil { + cn.setBad() + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } // ignore any results default: - cn.bad = true + cn.setBad() errorf("unknown %s response: %q", protocolState, t) } } } -func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []oid.Oid) { +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { n := r.int16() colNames = make([]string, n) - colTyps = make([]oid.Oid, n) + colTyps = make([]fieldDesc, n) for i := range colNames { colNames[i] = r.string() r.next(6) - colTyps[i] = r.oid() - r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() // format code not known when describing a statement; always 0 r.next(2) } return } -func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []oid.Oid) { +func parsePortalRowDescribe(r *readBuf) rowsHeader { n := r.int16() - colNames = make([]string, n) - colFmts = make([]format, n) - colTyps = make([]oid.Oid, n) + colNames := make([]string, n) + colFmts := make([]format, n) + colTyps := make([]fieldDesc, n) for i := range colNames { colNames[i] = r.string() r.next(6) - colTyps[i] = r.oid() - r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() colFmts[i] = format(r.int16()) } - return + return rowsHeader{ + colNames: colNames, + colFmts: colFmts, + colTyps: colTyps, + } } // parseEnviron tries to mimic some of libpq's environment handling @@ -1711,7 +1976,7 @@ func parseEnviron(env []string) (out map[string]string) { accrue("user") case "PGPASSWORD": accrue("password") - case "PGPASSFILE", "PGSERVICE", "PGSERVICEFILE", "PGREALM": + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": unsupported() case "PGOPTIONS": accrue("options") diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 000000000..8cab67c9d --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,165 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "sync/atomic" + "time" +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) Ping(ctx context.Context) error { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + rows, err := cn.simpleQuery(";") + if err != nil { + return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger + } + rows.Close() + return nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}, 1) + go func() { + select { + case <-done: + select { + case finished <- struct{}{}: + default: + // We raced with the finish func, let the next query handle this with the + // context. + return + } + + // Set the connection state to bad so it does not get reused. + cn.setBad() + + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + _ = cn.cancel(ctxCancel) + case <-finished: + } + }() + return func() { + select { + case <-finished: + cn.setBad() + cn.Close() + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel(ctx context.Context) error { + c, err := dial(ctx, cn.dialer, cn.opts) + if err != nil { + return err + } + defer c.Close() + + { + bad := &atomic.Value{} + bad.Store(false) + can := conn{ + c: c, + bad: bad, + } + err = can.ssl(cn.opts) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go new file mode 100644 index 000000000..d7d472615 --- /dev/null +++ b/vendor/github.com/lib/pq/connector.go @@ -0,0 +1,115 @@ +package pq + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" +) + +// Connector represents a fixed configuration for the pq driver with a given +// name. Connector satisfies the database/sql/driver Connector interface and +// can be used to create any number of DB Conn's via the database/sql OpenDB +// function. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +type Connector struct { + opts values + dialer Dialer +} + +// Connect returns a connection to the database using the fixed configuration +// of this Connector. Context is not used. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + return c.open(ctx) +} + +// Driver returns the underlying driver of this Connector. +func (c *Connector) Driver() driver.Driver { + return &Driver{} +} + +// NewConnector returns a connector for the pq driver in a fixed configuration +// with the given dsn. The returned connector can be used to create any number +// of equivalent Conn's. The returned connector is intended to be used with +// database/sql.OpenDB. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +func NewConnector(dsn string) (*Connector, error) { + var err error + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + dsn, err = ParseURL(dsn) + if err != nil { + return nil, err + } + } + + if err := parseOpts(dsn, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + // SSL is not necessary or supported over UNIX domain sockets + if network, _ := network(o); network == "unix" { + o["sslmode"] = "disable" + } + + return &Connector{opts: o, dialer: defaultDialer{}}, nil +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go index e44fa48a5..bb3cbd7b9 100644 --- a/vendor/github.com/lib/pq/copy.go +++ b/vendor/github.com/lib/pq/copy.go @@ -13,6 +13,7 @@ var ( errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") errCopyToNotSupported = errors.New("pq: COPY TO is not supported") errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") ) // CopyIn creates a COPY FROM statement which can be prepared with @@ -48,6 +49,7 @@ type copyin struct { buffer []byte rowData chan []byte done chan bool + driver.Result closed bool @@ -96,13 +98,13 @@ awaitCopyInResponse: err = parseError(r) case 'Z': if err == nil { - cn.bad = true + ci.setBad() errorf("unexpected ReadyForQuery in response to COPY") } cn.processReadyForQuery(r) return nil, err default: - cn.bad = true + ci.setBad() errorf("unknown response for copy query: %q", t) } } @@ -121,7 +123,7 @@ awaitCopyInResponse: cn.processReadyForQuery(r) return nil, err default: - cn.bad = true + ci.setBad() errorf("unknown response for CopyFail: %q", t) } } @@ -142,7 +144,7 @@ func (ci *copyin) resploop() { var r readBuf t, err := ci.cn.recvMessage(&r) if err != nil { - ci.cn.bad = true + ci.setBad() ci.setError(err) ci.done <- true return @@ -150,8 +152,12 @@ func (ci *copyin) resploop() { switch t { case 'C': // complete + res, _ := ci.cn.parseComplete(r.string()) + ci.setResult(res) case 'N': - // NoticeResponse + if n := ci.cn.noticeHandler; n != nil { + n(parseError(&r)) + } case 'Z': ci.cn.processReadyForQuery(&r) ci.done <- true @@ -160,7 +166,7 @@ func (ci *copyin) resploop() { err := parseError(&r) ci.setError(err) default: - ci.cn.bad = true + ci.setBad() ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) ci.done <- true return @@ -168,6 +174,19 @@ func (ci *copyin) resploop() { } } +func (ci *copyin) setBad() { + ci.Lock() + ci.cn.setBad() + ci.Unlock() +} + +func (ci *copyin) isBad() bool { + ci.Lock() + b := ci.cn.getBad() + ci.Unlock() + return b +} + func (ci *copyin) isErrorSet() bool { ci.Lock() isSet := (ci.err != nil) @@ -185,6 +204,22 @@ func (ci *copyin) setError(err error) { ci.Unlock() } +func (ci *copyin) setResult(result driver.Result) { + ci.Lock() + ci.Result = result + ci.Unlock() +} + +func (ci *copyin) getResult() driver.Result { + ci.Lock() + result := ci.Result + ci.Unlock() + if result == nil { + return driver.RowsAffected(0) + } + return result +} + func (ci *copyin) NumInput() int { return -1 } @@ -205,7 +240,7 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { return nil, errCopyInClosed } - if ci.cn.bad { + if ci.isBad() { return nil, driver.ErrBadConn } defer ci.cn.errRecover(&err) @@ -215,9 +250,11 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { } if len(v) == 0 { - err = ci.Close() - ci.closed = true - return nil, err + if err := ci.Close(); err != nil { + return driver.RowsAffected(0), err + } + + return ci.getResult(), nil } numValues := len(v) @@ -240,11 +277,12 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { } func (ci *copyin) Close() (err error) { - if ci.closed { - return errCopyInClosed + if ci.closed { // Don't do anything, we're already closed + return nil } + ci.closed = true - if ci.cn.bad { + if ci.isBad() { return driver.ErrBadConn } defer ci.cn.errRecover(&err) @@ -259,6 +297,7 @@ func (ci *copyin) Close() (err error) { } <-ci.done + ci.cn.inCopy = false if ci.isErrorSet() { err = ci.err diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go index f772117d0..b57184801 100644 --- a/vendor/github.com/lib/pq/doc.go +++ b/vendor/github.com/lib/pq/doc.go @@ -11,7 +11,8 @@ using this package directly. For example: ) func main() { - db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) if err != nil { log.Fatal(err) } @@ -23,7 +24,8 @@ using this package directly. For example: You can also connect to a database using a URL. For example: - db, err := sql.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full") + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) Connection String Parameters @@ -43,21 +45,28 @@ supported: * dbname - The name of the database to connect to * user - The user to sign in as * password - The user's password - * host - The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) * port - The port to bind to. (default is 5432) - * sslmode - Whether or not to use SSL (default is require, this is not the default for libpq) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) * fallback_application_name - An application_name to fall back to if one isn't provided. - * connect_timeout - Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. * sslcert - Cert file location. The file must contain PEM encoded data. * sslkey - Key file location. The file must contain PEM encoded data. - * sslrootcert - The location of the root certificate file. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. Valid values for sslmode are: * disable - No SSL * require - Always SSL (skip verification) - * verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA) - * verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING for more information about connection string parameters. @@ -68,7 +77,7 @@ Use single quotes for values that contain whitespace: A backslash will escape the next character in values: - "user=space\ man password='it\'s valid' + "user=space\ man password='it\'s valid'" Note that the connection parameter client_encoding (which sets the text encoding for the connection) may be set but must be "UTF8", @@ -86,9 +95,13 @@ variables not supported by pq are set, pq will panic during connection establishment. Environment variables have a lower precedence than explicitly provided connection parameters. +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + Queries + database/sql does not dictate any specific format for parameter markers in query strings, and pq uses the Postgres-native ordinal markers, as shown above. The same marker can be reused for the same parameter: @@ -112,8 +125,30 @@ For more details on RETURNING, see the Postgres documentation: For additional instructions on querying see the documentation for the database/sql package. + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + Errors + pq may return errors of type *pq.Error which can be interrogated for error details: if err, ok := err.(*pq.Error); ok { @@ -204,7 +239,30 @@ for more information). Note that the channel name will be truncated to 63 bytes by the PostgreSQL server. You can find a complete, working example of Listener usage at -http://godoc.org/github.com/lib/pq/listen_example. +https://godoc.org/github.com/lib/pq/example/listen. + + +Kerberos Support + + +If you need support for Kerberos authentication, add the following to your main +package: + + import "github.com/lib/pq/auth/kerberos" + + func init() { + pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) + } + +This package is in a separate module so that users who don't need Kerberos +don't have to download unnecessary dependencies. + +When imported, additional connection string parameters are supported: + * krbsrvname - GSS (Kerberos) service name when constructing the + SPN (default is `postgres`). This will be combined with the host + to form the full SPN: `krbsrvname/host`. + * krbspn - GSS (Kerberos) SPN. This takes priority over + `krbsrvname` if present. */ package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go index 88422eb3d..c4dafe270 100644 --- a/vendor/github.com/lib/pq/encode.go +++ b/vendor/github.com/lib/pq/encode.go @@ -5,8 +5,10 @@ import ( "database/sql/driver" "encoding/binary" "encoding/hex" + "errors" "fmt" "math" + "regexp" "strconv" "strings" "sync" @@ -15,6 +17,8 @@ import ( "github.com/lib/pq/oid" ) +var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) + func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { switch v := x.(type) { case []byte: @@ -22,7 +26,6 @@ func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { default: return encode(parameterStatus, x, oid.T_unknown) } - panic("not reached") } func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { @@ -56,10 +59,13 @@ func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) [ } func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { - if f == formatBinary { + switch f { + case formatBinary: return binaryDecode(parameterStatus, s, typ) - } else { + case formatText: return textDecode(parameterStatus, s, typ) + default: + panic("not reached") } } @@ -73,9 +79,15 @@ func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) inter return int64(int32(binary.BigEndian.Uint32(s))) case oid.T_int2: return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b default: - errorf("don't know how to decode binary parameter of type %u", uint32(typ)) + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) } panic("not reached") @@ -83,8 +95,14 @@ func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) inter func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) case oid.T_bytea: - return parseBytea(s) + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b case oid.T_timestamptz: return parseTs(parameterStatus.currentLocation, string(s)) case oid.T_timestamp, oid.T_date: @@ -102,11 +120,10 @@ func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interfa } return i case oid.T_float4, oid.T_float8: - bits := 64 - if typ == oid.T_float4 { - bits = 32 - } - f, err := strconv.ParseFloat(string(s), bits) + // We always use 64 bit parsing, regardless of whether the input text is for + // a float4 or float8, because clients expect float64s for all float datatypes + // and returning a 32-bit parsed float64 produces lossy results. + f, err := strconv.ParseFloat(string(s), 64) if err != nil { errorf("%s", err) } @@ -188,23 +205,63 @@ func mustParse(f string, typ oid.Oid, s []byte) time.Time { str[len(str)-3] == ':' { f += ":00" } + // Special case for 24:00 time. + // Unfortunately, golang does not parse 24:00 as a proper time. + // In this case, we want to try "round to the next day", to differentiate. + // As such, we find if the 24:00 time matches at the beginning; if so, + // we default it back to 00:00 but add a day later. + var is2400Time bool + switch typ { + case oid.T_timetz, oid.T_time: + if matches := time2400Regex.FindStringSubmatch(str); matches != nil { + // Concatenate timezone information at the back. + str = "00:00:00" + str[len(matches[1]):] + is2400Time = true + } + } t, err := time.Parse(f, str) if err != nil { errorf("decode: %s", err) } + if is2400Time { + t = t.Add(24 * time.Hour) + } return t } -func expect(str, char string, pos int) { - if c := str[pos : pos+1]; c != char { - errorf("expected '%v' at position %v; got '%v'", char, pos, c) +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) } } -func mustAtoi(str string) int { - result, err := strconv.Atoi(str) +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) if err != nil { - errorf("expected number; got '%v'", str) + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 } return result } @@ -219,7 +276,7 @@ type locationCache struct { // about 5% speed could be gained by putting the cache in the connection and // losing the mutex, at the cost of a small amount of memory and a somewhat // significant increase in code complexity. -var globalLocationCache *locationCache = newLocationCache() +var globalLocationCache = newLocationCache() func newLocationCache() *locationCache { return &locationCache{cache: make(map[int]*time.Location)} @@ -249,26 +306,26 @@ const ( infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" ) -/* - * If EnableInfinityTs is not called, "-infinity" and "infinity" will return - * []byte("-infinity") and []byte("infinity") respectively, and potentially - * cause error "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time", - * when scanning into a time.Time value. - * - * Once EnableInfinityTs has been called, all connections created using this - * driver will decode Postgres' "-infinity" and "infinity" for "timestamp", - * "timestamp with time zone" and "date" types to the predefined minimum and - * maximum times, respectively. When encoding time.Time values, any time which - * equals or preceeds the predefined minimum time will be encoded to - * "-infinity". Any values at or past the maximum time will similarly be - * encoded to "infinity". - * - * - * If EnableInfinityTs is called with negative >= positive, it will panic. - * Calling EnableInfinityTs after a connection has been established results in - * undefined behavior. If EnableInfinityTs is called more than once, it will - * panic. - */ +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. func EnableInfinityTs(negative time.Time, positive time.Time) { if infinityTsEnabled { panic(infinityTsEnabledAlready) @@ -305,28 +362,48 @@ func parseTs(currentLocation *time.Location, str string) interface{} { } return []byte(str) } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} monSep := strings.IndexRune(str, '-') // this is Gregorian year, not ISO Year // In Gregorian system, the year 1 BC is followed by AD 1 - year := mustAtoi(str[:monSep]) + year := p.mustAtoi(str, 0, monSep) daySep := monSep + 3 - month := mustAtoi(str[monSep+1 : daySep]) - expect(str, "-", daySep) + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) timeSep := daySep + 3 - day := mustAtoi(str[daySep+1 : timeSep]) + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } var hour, minute, second int - if len(str) > monSep+len("01-01")+1 { - expect(str, " ", timeSep) + if len(str) > minLen { + p.expect(str, ' ', timeSep) minSep := timeSep + 3 - expect(str, ":", minSep) - hour = mustAtoi(str[timeSep+1 : minSep]) + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) secSep := minSep + 3 - expect(str, ":", secSep) - minute = mustAtoi(str[minSep+1 : secSep]) + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) secEnd := secSep + 3 - second = mustAtoi(str[secSep+1 : secEnd]) + second = p.mustAtoi(str, secSep+1, secEnd) } remainderIdx := monSep + len("01-01 00:00:00") + 1 // Three optional (but ordered) sections follow: the @@ -337,49 +414,51 @@ func parseTs(currentLocation *time.Location, str string) interface{} { nanoSec := 0 tzOff := 0 - if remainderIdx < len(str) && str[remainderIdx:remainderIdx+1] == "." { + if remainderIdx < len(str) && str[remainderIdx] == '.' { fracStart := remainderIdx + 1 fracOff := strings.IndexAny(str[fracStart:], "-+ ") if fracOff < 0 { fracOff = len(str) - fracStart } - fracSec := mustAtoi(str[fracStart : fracStart+fracOff]) + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) remainderIdx += fracOff + 1 } - if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart:tzStart+1] == "-" || str[tzStart:tzStart+1] == "+") { + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { // time zone separator is always '-' or '+' (UTC is +00) var tzSign int - if c := str[tzStart : tzStart+1]; c == "-" { + switch c := str[tzStart]; c { + case '-': tzSign = -1 - } else if c == "+" { + case '+': tzSign = +1 - } else { - errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) } - tzHours := mustAtoi(str[tzStart+1 : tzStart+3]) + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) remainderIdx += 3 var tzMin, tzSec int - if tzStart+3 < len(str) && str[tzStart+3:tzStart+4] == ":" { - tzMin = mustAtoi(str[tzStart+4 : tzStart+6]) + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) remainderIdx += 3 } - if tzStart+6 < len(str) && str[tzStart+6:tzStart+7] == ":" { - tzSec = mustAtoi(str[tzStart+7 : tzStart+9]) + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) remainderIdx += 3 } tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) } var isoYear int - if remainderIdx < len(str) && str[remainderIdx:remainderIdx+3] == " BC" { + + if isBC { isoYear = 1 - year remainderIdx += 3 } else { isoYear = year } if remainderIdx < len(str) { - errorf("expected end of input, got %v", str[remainderIdx:]) + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) } t := time.Date(isoYear, time.Month(month), day, hour, minute, second, nanoSec, @@ -396,11 +475,11 @@ func parseTs(currentLocation *time.Location, str string) interface{} { } } - return t + return t, p.err } // formatTs formats t into a format postgres understands. -func formatTs(t time.Time) (b []byte) { +func formatTs(t time.Time) []byte { if infinityTsEnabled { // t <= -infinity : ! (t > -infinity) if !t.After(infinityTsNegative) { @@ -411,6 +490,11 @@ func formatTs(t time.Time) (b []byte) { return []byte("infinity") } } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { // Need to send dates before 0001 A.D. with " BC" suffix, instead of the // minus sign preferred by Go. // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on @@ -420,10 +504,10 @@ func formatTs(t time.Time) (b []byte) { t = t.AddDate((-t.Year())*2+1, 0, 0) bc = true } - b = []byte(t.Format(time.RFC3339Nano)) + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) _, offset := t.Zone() - offset = offset % 60 + offset %= 60 if offset != 0 { // RFC3339Nano already printed the minus sign if offset < 0 { @@ -445,14 +529,14 @@ func formatTs(t time.Time) (b []byte) { // Parse a bytea value received from the server. Both "hex" and the legacy // "escape" format are supported. -func parseBytea(s []byte) (result []byte) { +func parseBytea(s []byte) (result []byte, err error) { if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { // bytea_output = hex s = s[2:] // trim off leading "\\x" result = make([]byte, hex.DecodedLen(len(s))) _, err := hex.Decode(result, s) if err != nil { - errorf("%s", err) + return nil, err } } else { // bytea_output = escape @@ -467,11 +551,11 @@ func parseBytea(s []byte) (result []byte) { // '\\' followed by an octal number if len(s) < 4 { - errorf("invalid bytea sequence %v", s) + return nil, fmt.Errorf("invalid bytea sequence %v", s) } r, err := strconv.ParseInt(string(s[1:4]), 8, 9) if err != nil { - errorf("could not parse bytea value: %s", err.Error()) + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) } result = append(result, byte(r)) s = s[4:] @@ -489,7 +573,7 @@ func parseBytea(s []byte) (result []byte) { } } - return result + return result, nil } func encodeBytea(serverVersion int, v []byte) (result []byte) { diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go index b4bb44cee..c19c349f1 100644 --- a/vendor/github.com/lib/pq/error.go +++ b/vendor/github.com/lib/pq/error.go @@ -153,6 +153,7 @@ var errorCodeNames = map[ErrorCode]string{ "22004": "null_value_not_allowed", "22002": "null_value_no_indicator_parameter", "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", "22026": "string_data_length_mismatch", "22001": "string_data_right_truncation", "22011": "substring_error", @@ -459,6 +460,11 @@ func errorf(s string, args ...interface{}) { panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) } +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + func errRecoverNoErrBadConn(err *error) { e := recover() if e == nil { @@ -472,13 +478,13 @@ func errRecoverNoErrBadConn(err *error) { } } -func (c *conn) errRecover(err *error) { +func (cn *conn) errRecover(err *error) { e := recover() switch v := e.(type) { case nil: // Do nothing case runtime.Error: - c.bad = true + cn.setBad() panic(v) case *Error: if v.Fatal() { @@ -487,6 +493,10 @@ func (c *conn) errRecover(err *error) { *err = v } case *net.OpError: + cn.setBad() + *err = v + case *safeRetryError: + cn.setBad() *err = driver.ErrBadConn case error: if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { @@ -496,13 +506,13 @@ func (c *conn) errRecover(err *error) { } default: - c.bad = true + cn.setBad() panic(fmt.Sprintf("unknown error: %#v", e)) } // Any time we return ErrBadConn, we need to remember it since *Tx doesn't // mark the connection bad in database/sql. if *err == driver.ErrBadConn { - c.bad = true + cn.setBad() } } diff --git a/vendor/github.com/lib/pq/go.mod b/vendor/github.com/lib/pq/go.mod new file mode 100644 index 000000000..b5a5639ab --- /dev/null +++ b/vendor/github.com/lib/pq/go.mod @@ -0,0 +1,3 @@ +module github.com/lib/pq + +go 1.13 diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go new file mode 100644 index 000000000..408ec01f9 --- /dev/null +++ b/vendor/github.com/lib/pq/krb.go @@ -0,0 +1,27 @@ +package pq + +// NewGSSFunc creates a GSS authentication provider, for use with +// RegisterGSSProvider. +type NewGSSFunc func() (GSS, error) + +var newGss NewGSSFunc + +// RegisterGSSProvider registers a GSS authentication provider. For example, if +// you need to use Kerberos to authenticate with your server, add this to your +// main package: +// +// import "github.com/lib/pq/auth/kerberos" +// +// func init() { +// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) +// } +func RegisterGSSProvider(newGssArg NewGSSFunc) { + newGss = newGssArg +} + +// GSS provides GSSAPI authentication (e.g., Kerberos). +type GSS interface { + GetInitToken(host string, service string) ([]byte, error) + GetInitTokenFromSpn(spn string) ([]byte, error) + Continue(inToken []byte) (done bool, outToken []byte, err error) +} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go new file mode 100644 index 000000000..01dd8c723 --- /dev/null +++ b/vendor/github.com/lib/pq/notice.go @@ -0,0 +1,71 @@ +// +build go1.10 + +package pq + +import ( + "context" + "database/sql/driver" +) + +// NoticeHandler returns the notice handler on the given connection, if any. A +// runtime panic occurs if c is not a pq connection. This is rarely used +// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. +func NoticeHandler(c driver.Conn) func(*Error) { + return c.(*conn).noticeHandler +} + +// SetNoticeHandler sets the given notice handler on the given connection. A +// runtime panic occurs if c is not a pq connection. A nil handler may be used +// to unset it. This is rarely used directly, use ConnectorNoticeHandler and +// ConnectorWithNoticeHandler instead. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNoticeHandler(c driver.Conn, handler func(*Error)) { + c.(*conn).noticeHandler = handler +} + +// NoticeHandlerConnector wraps a regular connector and sets a notice handler +// on it. +type NoticeHandlerConnector struct { + driver.Connector + noticeHandler func(*Error) +} + +// Connect calls the underlying connector's connect method and then sets the +// notice handler. +func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNoticeHandler(c, n.noticeHandler) + } + return c, err +} + +// ConnectorNoticeHandler returns the currently set notice handler, if any. If +// the given connector is not a result of ConnectorWithNoticeHandler, nil is +// returned. +func ConnectorNoticeHandler(c driver.Connector) func(*Error) { + if c, ok := c.(*NoticeHandlerConnector); ok { + return c.noticeHandler + } + return nil +} + +// ConnectorWithNoticeHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notice +// handler. A nil notice handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { + if c, ok := c.(*NoticeHandlerConnector); ok { + c.noticeHandler = handler + return c + } + return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go index 8cad57815..5c421fdb8 100644 --- a/vendor/github.com/lib/pq/notify.go +++ b/vendor/github.com/lib/pq/notify.go @@ -4,6 +4,8 @@ package pq // This module contains support for Postgres LISTEN/NOTIFY. import ( + "context" + "database/sql/driver" "errors" "fmt" "sync" @@ -29,6 +31,61 @@ func recvNotification(r *readBuf) *Notification { return &Notification{bePid, channel, extra} } +// SetNotificationHandler sets the given notification handler on the given +// connection. A runtime panic occurs if c is not a pq connection. A nil handler +// may be used to unset it. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { + c.(*conn).notificationHandler = handler +} + +// NotificationHandlerConnector wraps a regular connector and sets a notification handler +// on it. +type NotificationHandlerConnector struct { + driver.Connector + notificationHandler func(*Notification) +} + +// Connect calls the underlying connector's connect method and then sets the +// notification handler. +func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNotificationHandler(c, n.notificationHandler) + } + return c, err +} + +// ConnectorNotificationHandler returns the currently set notification handler, if any. If +// the given connector is not a result of ConnectorWithNotificationHandler, nil is +// returned. +func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { + if c, ok := c.(*NotificationHandlerConnector); ok { + return c.notificationHandler + } + return nil +} + +// ConnectorWithNotificationHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notification +// handler. A nil notification handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { + if c, ok := c.(*NotificationHandlerConnector); ok { + c.notificationHandler = handler + return c + } + return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} +} + const ( connStateIdle int32 = iota connStateExpectResponse @@ -60,16 +117,20 @@ type ListenerConn struct { replyChan chan message } -// Creates a new ListenerConn. Use NewListener instead. +// NewListenerConn creates a new ListenerConn. Use NewListener instead. func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { - cn, err := Open(name) + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) if err != nil { return nil, err } l := &ListenerConn{ cn: cn.(*conn), - notificationChan: notificationChan, + notificationChan: c, connState: connStateIdle, replyChan: make(chan message, 2), } @@ -170,8 +231,12 @@ func (l *ListenerConn) listenerConnLoop() (err error) { } l.replyChan <- message{t, nil} - case 'N', 'S': + case 'S': // ignore + case 'N': + if n := l.cn.noticeHandler; n != nil { + n(parseError(r)) + } default: return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) } @@ -210,17 +275,17 @@ func (l *ListenerConn) listenerConnMain() { // this ListenerConn is done } -// Send a LISTEN query to the server. See ExecSimpleQuery. +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. func (l *ListenerConn) Listen(channel string) (bool, error) { return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) } -// Send an UNLISTEN query to the server. See ExecSimpleQuery. +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. func (l *ListenerConn) Unlisten(channel string) (bool, error) { return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) } -// Send `UNLISTEN *` to the server. See ExecSimpleQuery. +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. func (l *ListenerConn) UnlistenAll() (bool, error) { return l.ExecSimpleQuery("UNLISTEN *") } @@ -263,8 +328,8 @@ func (l *ListenerConn) sendSimpleQuery(q string) (err error) { return nil } -// Execute a "simple query" (i.e. one with no bindable parameters) on the -// connection. The possible return values are: +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: // 1) "executed" is true; the query was executed to completion on the // database server. If the query failed, err will be set to the error // returned by the database, otherwise err will be nil. @@ -329,6 +394,7 @@ func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { } } +// Close closes the connection. func (l *ListenerConn) Close() error { l.connectionLock.Lock() if l.err != nil { @@ -342,7 +408,7 @@ func (l *ListenerConn) Close() error { return l.cn.c.Close() } -// Err() returns the reason the connection was closed. It is not safe to call +// Err returns the reason the connection was closed. It is not safe to call // this function until l.Notify has been closed. func (l *ListenerConn) Err() error { return l.err @@ -350,32 +416,43 @@ func (l *ListenerConn) Err() error { var errListenerClosed = errors.New("pq: Listener has been closed") +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. var ErrChannelNotOpen = errors.New("pq: channel is not open") +// ListenerEventType is an enumeration of listener event types. type ListenerEventType int const ( - // Emitted only when the database connection has been initially - // initialized. err will always be nil. + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. ListenerEventConnected ListenerEventType = iota - // Emitted after a database connection has been lost, either because of an - // error or because Close has been called. err will be set to the reason - // the database connection was lost. + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. ListenerEventDisconnected - // Emitted after a database connection has been re-established after - // connection loss. err will always be nil. After this event has been - // emitted, a nil pq.Notification is sent on the Listener.Notify channel. + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. ListenerEventReconnected - // Emitted after a connection to the database was attempted, but failed. - // err will be set to an error describing why the connection attempt did - // not succeed. + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. ListenerEventConnectionAttemptFailed ) +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. type EventCallbackType func(event ListenerEventType, err error) // Listener provides an interface for listening to notifications from a @@ -391,6 +468,7 @@ type Listener struct { name string minReconnectInterval time.Duration maxReconnectInterval time.Duration + dialer Dialer eventCallback EventCallbackType lock sync.Mutex @@ -421,10 +499,21 @@ func NewListener(name string, minReconnectInterval time.Duration, maxReconnectInterval time.Duration, eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + l := &Listener{ name: name, minReconnectInterval: minReconnectInterval, maxReconnectInterval: maxReconnectInterval, + dialer: d, eventCallback: eventCallback, channels: make(map[string]struct{}), @@ -438,9 +527,9 @@ func NewListener(name string, return l } -// Returns the notification channel for this listener. This is the same -// channel as Notify, and will not be recreated during the life time of the -// Listener. +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. func (l *Listener) NotificationChannel() <-chan *Notification { return l.Notify } @@ -609,7 +698,7 @@ func (l *Listener) disconnectCleanup() error { // after the connection has been established. func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { doneChan := make(chan error) - go func() { + go func(notificationChan <-chan *Notification) { for channel := range l.channels { // If we got a response, return that error to our caller as it's // going to be more descriptive than cn.Err(). @@ -623,14 +712,14 @@ func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notificatio // close and then return the error message from the connection, as // per ListenerConn's interface. if err != nil { - for _ = range notificationChan { + for range notificationChan { } doneChan <- cn.Err() return } } doneChan <- nil - }() + }(notificationChan) // Ignore notifications while synchronization is going on to avoid // deadlocks. We have to send a nil notification over Notify anyway as @@ -660,7 +749,7 @@ func (l *Listener) closed() bool { func (l *Listener) connect() error { notificationChan := make(chan *Notification, 32) - cn, err := NewListenerConn(l.name, notificationChan) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) if err != nil { return err } @@ -697,6 +786,9 @@ func (l *Listener) Close() error { } l.isClosed = true + // Unblock calls to Listen() + l.reconnectCond.Broadcast() + return nil } @@ -756,7 +848,7 @@ func (l *Listener) listenerConnLoop() { } l.emitEvent(ListenerEventDisconnected, err) - time.Sleep(nextReconnect.Sub(time.Now())) + time.Sleep(time.Until(nextReconnect)) } } diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go index 03df05a61..ecc84c2c8 100644 --- a/vendor/github.com/lib/pq/oid/types.go +++ b/vendor/github.com/lib/pq/oid/types.go @@ -1,4 +1,4 @@ -// generated by 'go run gen.go'; do not edit +// Code generated by gen.go. DO NOT EDIT. package oid @@ -18,6 +18,7 @@ const ( T_xid Oid = 28 T_cid Oid = 29 T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 T_pg_type Oid = 71 T_pg_attribute Oid = 75 T_pg_proc Oid = 81 @@ -28,6 +29,7 @@ const ( T_pg_node_tree Oid = 194 T__json Oid = 199 T_smgr Oid = 210 + T_index_am_handler Oid = 325 T_point Oid = 600 T_lseg Oid = 601 T_path Oid = 602 @@ -133,6 +135,9 @@ const ( T__uuid Oid = 2951 T_txid_snapshot Oid = 2970 T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 T_anyenum Oid = 3500 T_tsvector Oid = 3614 T_tsquery Oid = 3615 @@ -144,6 +149,8 @@ const ( T__regconfig Oid = 3735 T_regdictionary Oid = 3769 T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 T_anyrange Oid = 3831 T_event_trigger Oid = 3838 T_int4range Oid = 3904 @@ -158,4 +165,179 @@ const ( T__daterange Oid = 3913 T_int8range Oid = 3926 T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 ) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 000000000..c6aa5b9a3 --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go new file mode 100644 index 000000000..477216b60 --- /dev/null +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -0,0 +1,264 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-256, for example, use: +// +// client := scram.NewClient(sha256.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that occurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 16 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 000000000..d90208455 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,175 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + + // Accept renegotiation requests initiated by the backend. + // + // Renegotiation was deprecated then removed from PostgreSQL 9.5, but + // the default configuration of older versions has it enabled. Redshift + // also initiates renegotiations and cannot be reconfigured. + tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 000000000..3b7c3a2a3 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,20 @@ +// +build !windows + +package pq + +import "os" + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + if info.Mode().Perm()&0077 != 0 { + return ErrSSLKeyHasWorldPermissions + } + return nil +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 000000000..5d2c763ce --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go index 9bac95c48..f4d8a7c20 100644 --- a/vendor/github.com/lib/pq/url.go +++ b/vendor/github.com/lib/pq/url.go @@ -2,6 +2,7 @@ package pq import ( "fmt" + "net" nurl "net/url" "sort" "strings" @@ -54,12 +55,11 @@ func ParseURL(url string) (string, error) { accrue("password", v) } - i := strings.Index(u.Host, ":") - if i < 0 { + if host, port, err := net.SplitHostPort(u.Host); err != nil { accrue("host", u.Host) } else { - accrue("host", u.Host[:i]) - accrue("port", u.Host[i+1:]) + accrue("host", host) + accrue("port", port) } if u.Path != "" { diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go index e937d7d08..a51019205 100644 --- a/vendor/github.com/lib/pq/user_posix.go +++ b/vendor/github.com/lib/pq/user_posix.go @@ -1,6 +1,6 @@ // Package pq is a pure Go Postgres driver for the database/sql package. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris rumprun package pq diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 000000000..9a1b9e074 --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4dcf4bca1..bab2d5374 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -101,9 +101,10 @@ github.com/konsorten/go-windows-terminal-sequences github.com/kr/pretty # github.com/kr/text v0.2.0 github.com/kr/text -# github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f +# github.com/lib/pq v1.9.0 github.com/lib/pq github.com/lib/pq/oid +github.com/lib/pq/scram # github.com/magiconair/properties v1.5.3 github.com/magiconair/properties # github.com/mattn/go-sqlite3 v1.6.0